diff --git a/.changes/1.35.67.json b/.changes/1.35.67.json new file mode 100644 index 0000000000..fb331f9357 --- /dev/null +++ b/.changes/1.35.67.json @@ -0,0 +1,112 @@ +[ + { + "category": "``apigateway``", + "description": "Added support for custom domain names for private APIs.", + "type": "api-change" + }, + { + "category": "``application-autoscaling``", + "description": "Application Auto Scaling now supports Predictive Scaling to proactively increase the desired capacity ahead of predicted demand, ensuring improved availability and responsiveness for customers' applications. This feature is currently only made available for Amazon ECS Service scalable targets.", + "type": "api-change" + }, + { + "category": "``appsync``", + "description": "Add support for the Amazon Bedrock Runtime.", + "type": "api-change" + }, + { + "category": "``ce``", + "description": "This release introduces three new APIs that enable you to estimate the cost, coverage, and utilization impact of Savings Plans you plan to purchase. The three APIs are StartCommitmentPurchaseAnalysis, GetCommitmentPurchaseAnalysis, and ListCommitmentPurchaseAnalyses.", + "type": "api-change" + }, + { + "category": "``cloudfront``", + "description": "Adds support for Origin Selection between EMPv2 origins based on media quality score.", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "This release introduces new APIs for creating and managing CloudTrail Lake dashboards. It also adds support for resource-based policies on CloudTrail EventDataStore and Dashboard resource.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Adds support for requesting future-dated Capacity Reservations with a minimum commitment duration, enabling IPAM for organizational units within AWS Organizations, reserving EC2 Capacity Blocks that start in 30 minutes, and extending the end date of existing Capacity Blocks.", + "type": "api-change" + }, + { + "category": "``elasticache``", + "description": "Added support to modify the engine type for existing ElastiCache Users and User Groups. Customers can now modify the engine type from redis to valkey.", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "This feature adds support for enabling zonal shift on cross-zone enabled Application Load Balancer, as well as modifying HTTP request and response headers.", + "type": "api-change" + }, + { + "category": "``health``", + "description": "Adds metadata property to an AffectedEntity.", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "General Availability (GA) release of AWS IoT Device Management - Commands, to trigger light-weight remote actions on targeted devices", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "AWS IoT FleetWise now includes campaign parameters to store and forward data, configure MQTT topic as a data destination, and collect diagnostic trouble code data. It includes APIs for network agnostic data collection using custom decoding interfaces, and monitoring the last known state of vehicles.", + "type": "api-change" + }, + { + "category": "``iot-jobs-data``", + "description": "General Availability (GA) release of AWS IoT Device Management - Commands, to trigger light-weight remote actions on targeted devices", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Adds support for metrics for event source mappings for AWS Lambda", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Adds \"Create field indexes to improve query performance and reduce scan volume\" and \"Transform logs during ingestion\". Updates documentation for \"PutLogEvents with Entity\".", + "type": "api-change" + }, + { + "category": "``notifications``", + "description": "This release adds support for AWS User Notifications. You can now configure and view notifications from AWS services in a central location using the AWS SDK.", + "type": "api-change" + }, + { + "category": "``notificationscontacts``", + "description": "This release adds support for AWS User Notifications Contacts. You can now configure and view email contacts for AWS User Notifications using the AWS SDK.", + "type": "api-change" + }, + { + "category": "``resiliencehub``", + "description": "AWS Resilience Hub's new summary view visually represents applications' resilience through charts, enabling efficient resilience management. It provides a consolidated view of the app portfolio's resilience state and allows data export for custom stakeholder reporting.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Added support for providing high-level overviews of managed nodes and previewing the potential impact of a runbook execution.", + "type": "api-change" + }, + { + "category": "``ssm-quicksetup``", + "description": "Add methods that retrieve details about deployed configurations: ListConfigurations, GetConfiguration", + "type": "api-change" + }, + { + "category": "``xray``", + "description": "AWS X-Ray introduces Transaction Search APIs, enabling span ingestion into CloudWatch Logs for high-scale trace data indexing. These APIs support span-level queries, trace graph generation, and metric correlation for deeper application insights.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 460e232492..41eb14a3c8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,33 @@ CHANGELOG ========= +1.35.67 +======= + +* api-change:``apigateway``: Added support for custom domain names for private APIs. +* api-change:``application-autoscaling``: Application Auto Scaling now supports Predictive Scaling to proactively increase the desired capacity ahead of predicted demand, ensuring improved availability and responsiveness for customers' applications. This feature is currently only made available for Amazon ECS Service scalable targets. +* api-change:``appsync``: Add support for the Amazon Bedrock Runtime. +* api-change:``ce``: This release introduces three new APIs that enable you to estimate the cost, coverage, and utilization impact of Savings Plans you plan to purchase. The three APIs are StartCommitmentPurchaseAnalysis, GetCommitmentPurchaseAnalysis, and ListCommitmentPurchaseAnalyses. +* api-change:``cloudfront``: Adds support for Origin Selection between EMPv2 origins based on media quality score. +* api-change:``cloudtrail``: This release introduces new APIs for creating and managing CloudTrail Lake dashboards. It also adds support for resource-based policies on CloudTrail EventDataStore and Dashboard resource. +* api-change:``ec2``: Adds support for requesting future-dated Capacity Reservations with a minimum commitment duration, enabling IPAM for organizational units within AWS Organizations, reserving EC2 Capacity Blocks that start in 30 minutes, and extending the end date of existing Capacity Blocks. +* api-change:``elasticache``: Added support to modify the engine type for existing ElastiCache Users and User Groups. Customers can now modify the engine type from redis to valkey. +* api-change:``elbv2``: This feature adds support for enabling zonal shift on cross-zone enabled Application Load Balancer, as well as modifying HTTP request and response headers. +* api-change:``health``: Adds metadata property to an AffectedEntity. +* api-change:``iot``: General Availability (GA) release of AWS IoT Device Management - Commands, to trigger light-weight remote actions on targeted devices +* api-change:``iotfleetwise``: AWS IoT FleetWise now includes campaign parameters to store and forward data, configure MQTT topic as a data destination, and collect diagnostic trouble code data. It includes APIs for network agnostic data collection using custom decoding interfaces, and monitoring the last known state of vehicles. +* api-change:``iot-jobs-data``: General Availability (GA) release of AWS IoT Device Management - Commands, to trigger light-weight remote actions on targeted devices +* api-change:``lambda``: Adds support for metrics for event source mappings for AWS Lambda +* api-change:``logs``: Adds "Create field indexes to improve query performance and reduce scan volume" and "Transform logs during ingestion". Updates documentation for "PutLogEvents with Entity". +* api-change:``notifications``: This release adds support for AWS User Notifications. You can now configure and view notifications from AWS services in a central location using the AWS SDK. +* api-change:``notificationscontacts``: This release adds support for AWS User Notifications Contacts. You can now configure and view email contacts for AWS User Notifications using the AWS SDK. +* api-change:``resiliencehub``: AWS Resilience Hub's new summary view visually represents applications' resilience through charts, enabling efficient resilience management. It provides a consolidated view of the app portfolio's resilience state and allows data export for custom stakeholder reporting. +* api-change:``s3``: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. +* api-change:``ssm``: Added support for providing high-level overviews of managed nodes and previewing the potential impact of a runbook execution. +* api-change:``ssm-quicksetup``: Add methods that retrieve details about deployed configurations: ListConfigurations, GetConfiguration +* api-change:``xray``: AWS X-Ray introduces Transaction Search APIs, enabling span ingestion into CloudWatch Logs for high-scale trace data indexing. These APIs support span-level queries, trace graph generation, and metric correlation for deeper application insights. + + 1.35.66 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index ce7ae0bd71..5ee0a5e5a7 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.66' +__version__ = '1.35.67' class NullHandler(logging.Handler): diff --git a/botocore/data/apigateway/2015-07-09/service-2.json b/botocore/data/apigateway/2015-07-09/service-2.json index 5d932aa169..2d551032fd 100644 --- a/botocore/data/apigateway/2015-07-09/service-2.json +++ b/botocore/data/apigateway/2015-07-09/service-2.json @@ -145,6 +145,24 @@ ], "documentation":"
Creates a new domain name.
" }, + "CreateDomainNameAccessAssociation":{ + "name":"CreateDomainNameAccessAssociation", + "http":{ + "method":"POST", + "requestUri":"/domainnameaccessassociations", + "responseCode":201 + }, + "input":{"shape":"CreateDomainNameAccessAssociationRequest"}, + "output":{"shape":"DomainNameAccessAssociation"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Creates a domain name access association resource between an access association source and a private custom domain name.
" + }, "CreateModel":{ "name":"CreateModel", "http":{ @@ -432,6 +450,23 @@ ], "documentation":"Deletes the DomainName resource.
" }, + "DeleteDomainNameAccessAssociation":{ + "name":"DeleteDomainNameAccessAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/domainnameaccessassociations/{domain_name_access_association_arn}", + "responseCode":202 + }, + "input":{"shape":"DeleteDomainNameAccessAssociationRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Deletes the DomainNameAccessAssociation resource.
Only the AWS account that created the DomainNameAccessAssociation resource can delete it. To stop an access association source in another AWS account from accessing your private custom domain name, use the RejectDomainNameAccessAssociation operation.
" + }, "DeleteGatewayResponse":{ "name":"DeleteGatewayResponse", "http":{ @@ -964,6 +999,22 @@ ], "documentation":"Represents a domain name that is contained in a simpler, more intuitive URL that can be called.
" }, + "GetDomainNameAccessAssociations":{ + "name":"GetDomainNameAccessAssociations", + "http":{ + "method":"GET", + "requestUri":"/domainnameaccessassociations" + }, + "input":{"shape":"GetDomainNameAccessAssociationsRequest"}, + "output":{"shape":"DomainNameAccessAssociations"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Represents a collection on DomainNameAccessAssociations resources.
" + }, "GetDomainNames":{ "name":"GetDomainNames", "http":{ @@ -1621,6 +1672,23 @@ ], "documentation":"A feature of the API Gateway control service for updating an existing API with an input of external API definitions. The update can take the form of merging the supplied definition into the existing API or overwriting the existing API.
" }, + "RejectDomainNameAccessAssociation":{ + "name":"RejectDomainNameAccessAssociation", + "http":{ + "method":"POST", + "requestUri":"/rejectdomainnameaccessassociations", + "responseCode":202 + }, + "input":{"shape":"RejectDomainNameAccessAssociationRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Rejects a domain name access association with a private custom domain name.
To reject a domain name access association with an access association source in another AWS account, use this operation. To remove a domain name access association with an access association source in your own account, use the DeleteDomainNameAccessAssociation operation.
" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -2087,6 +2155,10 @@ } }, "shapes":{ + "AccessAssociationSourceType":{ + "type":"string", + "enum":["VPCE"] + }, "AccessLogSettings":{ "type":"structure", "members":{ @@ -2552,6 +2624,12 @@ "location":"uri", "locationName":"domain_name" }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "basePath":{ "shape":"String", "documentation":"The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Specify '(none)' if you do not want callers to specify a base path name after the domain name.
" @@ -2665,6 +2743,32 @@ }, "documentation":"Creates a new documentation version of a given API.
" }, + "CreateDomainNameAccessAssociationRequest":{ + "type":"structure", + "required":[ + "domainNameArn", + "accessAssociationSourceType", + "accessAssociationSource" + ], + "members":{ + "domainNameArn":{ + "shape":"String", + "documentation":"The ARN of the domain name.
" + }, + "accessAssociationSourceType":{ + "shape":"AccessAssociationSourceType", + "documentation":"The type of the domain name access association source.
" + }, + "accessAssociationSource":{ + "shape":"String", + "documentation":"The identifier of the domain name access association source. For a VPCE, the value is the VPC endpoint ID.
" + }, + "tags":{ + "shape":"MapOfStringToString", + "documentation":"The key-value map of strings. The valid character set is [a-zA-Z+-=._:/]. The tag key can be up to 128 characters and must not start with aws:
. The tag value can be up to 256 characters.
The user-friendly name of the certificate that will be used by edge-optimized endpoint for this domain name.
" + "documentation":"The user-friendly name of the certificate that will be used by edge-optimized endpoint or private endpoint for this domain name.
" }, "certificateBody":{ "shape":"String", - "documentation":"[Deprecated] The body of the server certificate that will be used by edge-optimized endpoint for this domain name provided by your certificate authority.
" + "documentation":"[Deprecated] The body of the server certificate that will be used by edge-optimized endpoint or private endpoint for this domain name provided by your certificate authority.
" }, "certificatePrivateKey":{ "shape":"String", @@ -2691,7 +2795,7 @@ }, "certificateArn":{ "shape":"String", - "documentation":"The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.
" + "documentation":"The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint or private endpoint for this domain name. Certificate Manager is the only supported source.
" }, "regionalCertificateName":{ "shape":"String", @@ -2717,6 +2821,10 @@ "ownershipVerificationCertificateArn":{ "shape":"String", "documentation":"The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn.
" + }, + "policy":{ + "shape":"String", + "documentation":"A stringified JSON policy document that applies to the execute-api
service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.
A request to create a new domain name.
" @@ -3044,6 +3152,12 @@ "location":"uri", "locationName":"domain_name" }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "basePath":{ "shape":"String", "documentation":"The base path name of the BasePathMapping resource to delete.
To specify an empty base path, set this parameter to '(none)'
.
Deletes an existing documentation version of an API.
" }, + "DeleteDomainNameAccessAssociationRequest":{ + "type":"structure", + "required":["domainNameAccessAssociationArn"], + "members":{ + "domainNameAccessAssociationArn":{ + "shape":"String", + "documentation":"The ARN of the domain name access association resource.
", + "location":"uri", + "locationName":"domain_name_access_association_arn" + } + } + }, "DeleteDomainNameRequest":{ "type":"structure", "required":["domainName"], @@ -3141,6 +3267,12 @@ "documentation":"The name of the DomainName resource to be deleted.
", "location":"uri", "locationName":"domain_name" + }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" } }, "documentation":"A request to delete the DomainName resource.
" @@ -3627,17 +3759,25 @@ "shape":"String", "documentation":"The custom domain name as an API host name, for example, my-api.example.com
.
The identifier for the domain name resource. Supported only for private custom domain names.
" + }, + "domainNameArn":{ + "shape":"String", + "documentation":"The ARN of the domain name. Supported only for private custom domain names.
" + }, "certificateName":{ "shape":"String", - "documentation":"The name of the certificate that will be used by edge-optimized endpoint for this domain name.
" + "documentation":"The name of the certificate that will be used by edge-optimized endpoint or private endpoint for this domain name.
" }, "certificateArn":{ "shape":"String", - "documentation":"The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.
" + "documentation":"The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint or private endpoint for this domain name. Certificate Manager is the only supported source.
" }, "certificateUploadDate":{ "shape":"Timestamp", - "documentation":"The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate.
" + "documentation":"The timestamp when the certificate that was used by edge-optimized endpoint or private endpoint for this domain name was uploaded.
" }, "regionalDomainName":{ "shape":"String", @@ -3690,10 +3830,55 @@ "ownershipVerificationCertificateArn":{ "shape":"String", "documentation":"The ARN of the public certificate issued by ACM to validate ownership of your custom domain. Only required when configuring mutual TLS and using an ACM imported or private CA certificate ARN as the regionalCertificateArn.
" + }, + "managementPolicy":{ + "shape":"String", + "documentation":"A stringified JSON policy document that applies to the API Gateway Management service for this DomainName. This policy document controls access for access association sources to create domain name access associations with this DomainName. Supported only for private custom domain names.
" + }, + "policy":{ + "shape":"String", + "documentation":"A stringified JSON policy document that applies to the execute-api
service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.
Represents a custom domain name as a user-friendly host name of an API (RestApi).
" }, + "DomainNameAccessAssociation":{ + "type":"structure", + "members":{ + "domainNameAccessAssociationArn":{ + "shape":"String", + "documentation":"The ARN of the domain name access association resource.
" + }, + "domainNameArn":{ + "shape":"String", + "documentation":"The ARN of the domain name.
" + }, + "accessAssociationSourceType":{ + "shape":"AccessAssociationSourceType", + "documentation":"The type of the domain name access association source.
" + }, + "accessAssociationSource":{ + "shape":"String", + "documentation":"The ARN of the domain name access association source. For a VPCE, the ARN must be a VPC endpoint.
" + }, + "tags":{ + "shape":"MapOfStringToString", + "documentation":"The collection of tags. Each tag element is associated with a given resource.
" + } + }, + "documentation":"Represents a domain name access association between an access association source and a private custom domain name. With a domain name access association, an access association source can invoke a private custom domain name while isolated from the public internet.
" + }, + "DomainNameAccessAssociations":{ + "type":"structure", + "members":{ + "position":{"shape":"String"}, + "items":{ + "shape":"ListOfDomainNameAccessAssociation", + "documentation":"The current page of elements from this collection.
", + "locationName":"item" + } + } + }, "DomainNameStatus":{ "type":"string", "enum":[ @@ -4006,6 +4191,12 @@ "location":"uri", "locationName":"domain_name" }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "basePath":{ "shape":"String", "documentation":"The base path name that callers of the API must provide as part of the URL after the domain name. This value must be unique for all of the mappings across a single API. Specify '(none)' if you do not want callers to specify any base path name after the domain name.
", @@ -4025,6 +4216,12 @@ "location":"uri", "locationName":"domain_name" }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "position":{ "shape":"String", "documentation":"The current pagination position in the paged result set.
", @@ -4242,6 +4439,29 @@ }, "documentation":"Gets the documentation versions of an API.
" }, + "GetDomainNameAccessAssociationsRequest":{ + "type":"structure", + "members":{ + "position":{ + "shape":"String", + "documentation":"The current pagination position in the paged result set.
", + "location":"querystring", + "locationName":"position" + }, + "limit":{ + "shape":"NullableInteger", + "documentation":"The maximum number of returned results per page. The default value is 25 and the maximum value is 500.
", + "location":"querystring", + "locationName":"limit" + }, + "resourceOwner":{ + "shape":"ResourceOwner", + "documentation":" The owner of the domain name access association. Use SELF
to only list the domain name access associations owned by your own account. Use OTHER_ACCOUNTS
to list the domain name access associations with your private custom domain names that are owned by other AWS accounts.
The name of the DomainName resource.
", "location":"uri", "locationName":"domain_name" + }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" } }, "documentation":"Request to get the name of a DomainName resource.
" @@ -4269,6 +4495,12 @@ "documentation":"The maximum number of returned results per page. The default value is 25 and the maximum value is 500.
", "location":"querystring", "locationName":"limit" + }, + "resourceOwner":{ + "shape":"ResourceOwner", + "documentation":"The owner of the domain name access association.
", + "location":"querystring", + "locationName":"resourceOwner" } }, "documentation":"Request to describe a collection of DomainName resources.
" @@ -5242,6 +5474,10 @@ "type":"list", "member":{"shape":"DomainName"} }, + "ListOfDomainNameAccessAssociation":{ + "type":"list", + "member":{"shape":"DomainNameAccessAssociation"} + }, "ListOfEndpointType":{ "type":"list", "member":{"shape":"EndpointType"} @@ -5964,6 +6200,27 @@ }, "documentation":"Quotas configured for a usage plan.
" }, + "RejectDomainNameAccessAssociationRequest":{ + "type":"structure", + "required":[ + "domainNameAccessAssociationArn", + "domainNameArn" + ], + "members":{ + "domainNameAccessAssociationArn":{ + "shape":"String", + "documentation":"The ARN of the domain name access association resource.
", + "location":"querystring", + "locationName":"domainNameAccessAssociationArn" + }, + "domainNameArn":{ + "shape":"String", + "documentation":"The ARN of the domain name.
", + "location":"querystring", + "locationName":"domainNameArn" + } + } + }, "RequestValidator":{ "type":"structure", "members":{ @@ -6024,6 +6281,13 @@ }, "documentation":"Represents an API resource.
" }, + "ResourceOwner":{ + "type":"string", + "enum":[ + "SELF", + "OTHER_ACCOUNTS" + ] + }, "Resources":{ "type":"structure", "members":{ @@ -6666,6 +6930,12 @@ "location":"uri", "locationName":"domain_name" }, + "domainNameId":{ + "shape":"String", + "documentation":"The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "basePath":{ "shape":"String", "documentation":"The base path of the BasePathMapping resource to change.
To specify an empty base path, set this parameter to '(none)'
.
The identifier for the domain name resource. Supported only for private custom domain names.
", + "location":"querystring", + "locationName":"domainNameId" + }, "patchOperations":{ "shape":"ListOfPatchOperation", "documentation":"For more information about supported patch operations, see Patch Operations.
" diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index 3c0ed37529..c62bb4b033 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -128,6 +128,20 @@ ], "documentation":"Describes the Application Auto Scaling scheduled actions for the specified service namespace.
You can filter the results using the ResourceId
, ScalableDimension
, and ScheduledActionNames
parameters.
For more information, see Scheduled scaling in the Application Auto Scaling User Guide.
" }, + "GetPredictiveScalingForecast":{ + "name":"GetPredictiveScalingForecast", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPredictiveScalingForecastRequest"}, + "output":{"shape":"GetPredictiveScalingForecastResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"Retrieves the forecast data for a predictive scaling policy.
Load forecasts are predictions of the hourly load values using historical load data from CloudWatch and an analysis of historical trends. Capacity forecasts are represented as predicted values for the minimum capacity that is needed on an hourly basis, based on the hourly load forecast.
A minimum of 24 hours of data is required to create the initial forecasts. However, having a full 14 days of historical data results in more accurate forecasts.
" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -259,6 +273,24 @@ "min":1, "pattern":"^arn:.+:application-autoscaling:.+:[0-9]+:scalable-target\\/[a-zA-Z0-9-]+$" }, + "CapacityForecast":{ + "type":"structure", + "required":[ + "Timestamps", + "Values" + ], + "members":{ + "Timestamps":{ + "shape":"PredictiveScalingForecastTimestamps", + "documentation":"The timestamps for the data points, in UTC format.
" + }, + "Values":{ + "shape":"PredictiveScalingForecastValues", + "documentation":"The values of the data points.
" + } + }, + "documentation":" A GetPredictiveScalingForecast
call returns the capacity forecast for a predictive scaling policy. This structure includes the data points for that capacity forecast, along with the timestamps of those data points.
Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have permission to call the CloudWatch DescribeAlarms on your behalf.
", "exception":true }, + "GetPredictiveScalingForecastRequest":{ + "type":"structure", + "required":[ + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "PolicyName", + "StartTime", + "EndTime" + ], + "members":{ + "ServiceNamespace":{ + "shape":"ServiceNamespace", + "documentation":" The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource
instead.
The identifier of the resource.
" + }, + "ScalableDimension":{ + "shape":"ScalableDimension", + "documentation":"The scalable dimension.
" + }, + "PolicyName":{ + "shape":"PolicyName", + "documentation":"The name of the policy.
" + }, + "StartTime":{ + "shape":"TimestampType", + "documentation":"The inclusive start time of the time range for the forecast data to get. At most, the date and time can be one year before the current date and time
" + }, + "EndTime":{ + "shape":"TimestampType", + "documentation":"The exclusive end time of the time range for the forecast data to get. The maximum time duration between the start and end time is 30 days.
" + } + } + }, + "GetPredictiveScalingForecastResponse":{ + "type":"structure", + "members":{ + "LoadForecast":{ + "shape":"LoadForecasts", + "documentation":"The load forecast.
" + }, + "CapacityForecast":{ + "shape":"CapacityForecast", + "documentation":"The capacity forecast.
" + }, + "UpdateTime":{ + "shape":"TimestampType", + "documentation":"The time the forecast was made.
" + } + } + }, "Id":{ "type":"string", "max":255, @@ -624,6 +710,33 @@ } } }, + "LoadForecast":{ + "type":"structure", + "required":[ + "Timestamps", + "Values", + "MetricSpecification" + ], + "members":{ + "Timestamps":{ + "shape":"PredictiveScalingForecastTimestamps", + "documentation":"The timestamps for the data points, in UTC format.
" + }, + "Values":{ + "shape":"PredictiveScalingForecastValues", + "documentation":"The values of the data points.
" + }, + "MetricSpecification":{ + "shape":"PredictiveScalingMetricSpecification", + "documentation":"The metric specification for the load forecast.
" + } + }, + "documentation":" A GetPredictiveScalingForecast
call returns the load forecast for a predictive scaling policy. This structure includes the data points for that load forecast, along with the timestamps of those data points and the metric specification.
Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget
and there is a target group attached to the Spot Fleet or ECS service.
You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). The format of the resource label is:
app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff
.
Where:
app/<load-balancer-name>/<load-balancer-id> is the final portion of the load balancer ARN
targetgroup/<target-group-name>/<target-group-id> is the final portion of the target group ARN.
To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers API operation. To find the ARN for the target group, use the DescribeTargetGroups API operation.
" } }, - "documentation":"Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.
For more information, Predefined metrics for target tracking scaling policies in the Application Auto Scaling User Guide.
" + "documentation":"Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling.
For more information, Predefined metrics for target tracking scaling policies in the Application Auto Scaling User Guide.
" + }, + "PredictiveScalingCustomizedMetricSpecification":{ + "type":"structure", + "required":["MetricDataQueries"], + "members":{ + "MetricDataQueries":{ + "shape":"PredictiveScalingMetricDataQueries", + "documentation":"One or more metric data queries to provide data points for a metric specification.
" + } + }, + "documentation":"Represents a CloudWatch metric of your choosing for a predictive scaling policy.
" + }, + "PredictiveScalingForecastTimestamps":{ + "type":"list", + "member":{"shape":"TimestampType"} + }, + "PredictiveScalingForecastValues":{ + "type":"list", + "member":{"shape":"MetricScale"} + }, + "PredictiveScalingMaxCapacityBreachBehavior":{ + "type":"string", + "enum":[ + "HonorMaxCapacity", + "IncreaseMaxCapacity" + ] + }, + "PredictiveScalingMaxCapacityBuffer":{ + "type":"integer", + "max":100, + "min":0 + }, + "PredictiveScalingMetric":{ + "type":"structure", + "members":{ + "Dimensions":{ + "shape":"PredictiveScalingMetricDimensions", + "documentation":"Describes the dimensions of the metric.
" + }, + "MetricName":{ + "shape":"PredictiveScalingMetricName", + "documentation":"The name of the metric.
" + }, + "Namespace":{ + "shape":"PredictiveScalingMetricNamespace", + "documentation":"The namespace of the metric.
" + } + }, + "documentation":"Describes the scaling metric.
" + }, + "PredictiveScalingMetricDataQueries":{ + "type":"list", + "member":{"shape":"PredictiveScalingMetricDataQuery"} + }, + "PredictiveScalingMetricDataQuery":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"Id", + "documentation":" A short name that identifies the object's results in the response. This name must be unique among all MetricDataQuery
objects specified for a single scaling policy. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscores. The first character must be a lowercase letter.
The math expression to perform on the returned data, if this object is performing a math expression. This expression can use the Id
of the other metrics to refer to those metrics, and can also use the Id
of other expressions to use the result of those expressions.
Conditional: Within each MetricDataQuery
object, you must specify either Expression
or MetricStat
, but not both.
Information about the metric data to return.
Conditional: Within each MetricDataQuery
object, you must specify either Expression
or MetricStat
, but not both.
A human-readable label for this metric or expression. This is especially useful if this is a math expression, so that you know what the value represents.
" + }, + "ReturnData":{ + "shape":"ReturnData", + "documentation":"Indicates whether to return the timestamps and raw data values of this metric.
If you use any math expressions, specify true
for this value for only the final math expression that the metric specification is based on. You must specify false
for ReturnData
for all the other metrics and expressions used in the metric specification.
If you are only retrieving metrics and not performing any math expressions, do not specify anything for ReturnData
. This sets it to its default (true
).
The metric data to return. Also defines whether this call is returning data for one metric only, or whether it is performing a math expression on the values of returned metric statistics to create a new time series. A time series is a series of data points, each of which is associated with a timestamp.
" + }, + "PredictiveScalingMetricDimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"PredictiveScalingMetricDimensionName", + "documentation":"The name of the dimension.
" + }, + "Value":{ + "shape":"PredictiveScalingMetricDimensionValue", + "documentation":"The value of the dimension.
" + } + }, + "documentation":"Describes the dimension of a metric.
" + }, + "PredictiveScalingMetricDimensionName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PredictiveScalingMetricDimensionValue":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PredictiveScalingMetricDimensions":{ + "type":"list", + "member":{"shape":"PredictiveScalingMetricDimension"} + }, + "PredictiveScalingMetricName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PredictiveScalingMetricNamespace":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PredictiveScalingMetricSpecification":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "TargetValue":{ + "shape":"MetricScale", + "documentation":"Specifies the target utilization.
" + }, + "PredefinedMetricPairSpecification":{ + "shape":"PredictiveScalingPredefinedMetricPairSpecification", + "documentation":"The predefined metric pair specification that determines the appropriate scaling metric and load metric to use.
" + }, + "PredefinedScalingMetricSpecification":{ + "shape":"PredictiveScalingPredefinedScalingMetricSpecification", + "documentation":"The predefined scaling metric specification.
" + }, + "PredefinedLoadMetricSpecification":{ + "shape":"PredictiveScalingPredefinedLoadMetricSpecification", + "documentation":"The predefined load metric specification.
" + }, + "CustomizedScalingMetricSpecification":{ + "shape":"PredictiveScalingCustomizedMetricSpecification", + "documentation":"The customized scaling metric specification.
" + }, + "CustomizedLoadMetricSpecification":{ + "shape":"PredictiveScalingCustomizedMetricSpecification", + "documentation":"The customized load metric specification.
" + }, + "CustomizedCapacityMetricSpecification":{ + "shape":"PredictiveScalingCustomizedMetricSpecification", + "documentation":"The customized capacity metric specification.
" + } + }, + "documentation":"This structure specifies the metrics and target utilization settings for a predictive scaling policy.
You must specify either a metric pair, or a load metric and a scaling metric individually. Specifying a metric pair instead of individual metrics provides a simpler way to configure metrics for a scaling policy. You choose the metric pair, and the policy automatically knows the correct sum and average statistics to use for the load metric and the scaling metric.
" + }, + "PredictiveScalingMetricSpecifications":{ + "type":"list", + "member":{"shape":"PredictiveScalingMetricSpecification"} + }, + "PredictiveScalingMetricStat":{ + "type":"structure", + "required":[ + "Metric", + "Stat" + ], + "members":{ + "Metric":{ + "shape":"PredictiveScalingMetric", + "documentation":"The CloudWatch metric to return, including the metric name, namespace, and dimensions. To get the exact metric name, namespace, and dimensions, inspect the Metric object that is returned by a call to ListMetrics.
" + }, + "Stat":{ + "shape":"XmlString", + "documentation":"The statistic to return. It can include any CloudWatch statistic or extended statistic. For a list of valid values, see the table in Statistics in the Amazon CloudWatch User Guide.
The most commonly used metrics for predictive scaling are Average
and Sum
.
The unit to use for the returned data points. For a complete list of the units that CloudWatch supports, see the MetricDatum data type in the Amazon CloudWatch API Reference.
" + } + }, + "documentation":"This structure defines the CloudWatch metric to return, along with the statistic and unit.
" + }, + "PredictiveScalingMetricType":{ + "type":"string", + "max":128, + "min":1 + }, + "PredictiveScalingMetricUnit":{ + "type":"string", + "max":1023, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "PredictiveScalingMode":{ + "type":"string", + "enum":[ + "ForecastOnly", + "ForecastAndScale" + ] + }, + "PredictiveScalingPolicyConfiguration":{ + "type":"structure", + "required":["MetricSpecifications"], + "members":{ + "MetricSpecifications":{ + "shape":"PredictiveScalingMetricSpecifications", + "documentation":"This structure includes the metrics and target utilization to use for predictive scaling.
This is an array, but we currently only support a single metric specification. That is, you can specify a target value and a single metric pair, or a target value and one scaling metric and one load metric.
" + }, + "Mode":{ + "shape":"PredictiveScalingMode", + "documentation":" The predictive scaling mode. Defaults to ForecastOnly
if not specified.
The amount of time, in seconds, that the start time can be advanced.
The value must be less than the forecast interval duration of 3600 seconds (60 minutes). Defaults to 300 seconds if not specified.
" + }, + "MaxCapacityBreachBehavior":{ + "shape":"PredictiveScalingMaxCapacityBreachBehavior", + "documentation":" Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity. Defaults to HonorMaxCapacity
if not specified.
The size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. For example, if the buffer is 10, this means a 10 percent buffer, such that if the forecast capacity is 50, and the maximum capacity is 40, then the effective maximum capacity is 55.
Required if the MaxCapacityBreachBehavior
property is set to IncreaseMaxCapacity
, and cannot be used otherwise.
Represents a predictive scaling policy configuration.
" + }, + "PredictiveScalingPredefinedLoadMetricSpecification":{ + "type":"structure", + "required":["PredefinedMetricType"], + "members":{ + "PredefinedMetricType":{ + "shape":"PredictiveScalingMetricType", + "documentation":"The metric type.
" + }, + "ResourceLabel":{ + "shape":"ResourceLabel", + "documentation":"A label that uniquely identifies a target group.
" + } + }, + "documentation":"Describes a load metric for a predictive scaling policy.
When returned in the output of DescribePolicies
, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.
Indicates which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric.
" + }, + "ResourceLabel":{ + "shape":"ResourceLabel", + "documentation":"A label that uniquely identifies a specific target group from which to determine the total and average request count.
" + } + }, + "documentation":"Represents a metric pair for a predictive scaling policy.
" + }, + "PredictiveScalingPredefinedScalingMetricSpecification":{ + "type":"structure", + "required":["PredefinedMetricType"], + "members":{ + "PredefinedMetricType":{ + "shape":"PredictiveScalingMetricType", + "documentation":"The metric type.
" + }, + "ResourceLabel":{ + "shape":"ResourceLabel", + "documentation":"A label that uniquely identifies a specific target group from which to determine the average request count.
" + } + }, + "documentation":"Describes a scaling metric for a predictive scaling policy.
When returned in the output of DescribePolicies
, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.
A target tracking scaling policy. Includes support for predefined or customized metrics.
This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling
.
The configuration of the predictive scaling policy.
" } } }, @@ -1017,6 +1417,10 @@ "shape":"ResourceCapacity", "documentation":"The maximum value to scale to in response to a scale-out activity.
" }, + "PredictedCapacity":{ + "shape":"ResourceCapacity", + "documentation":"The predicted capacity of the scalable target.
" + }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", "documentation":"The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.
" @@ -1182,6 +1586,10 @@ "shape":"TargetTrackingScalingPolicyConfiguration", "documentation":"A target tracking scaling policy.
" }, + "PredictiveScalingPolicyConfiguration":{ + "shape":"PredictiveScalingPolicyConfiguration", + "documentation":"The predictive scaling policy configuration.
" + }, "Alarms":{ "shape":"Alarms", "documentation":"The CloudWatch alarms associated with the scaling policy.
" diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index 4ecd0e692a..7ecb40d417 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -2649,7 +2649,8 @@ "HTTP", "RELATIONAL_DATABASE", "AMAZON_OPENSEARCH_SERVICE", - "AMAZON_EVENTBRIDGE" + "AMAZON_EVENTBRIDGE", + "AMAZON_BEDROCK_RUNTIME" ] }, "DataSources":{ diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index a58a550e30..e5447b9aa3 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -171,6 +171,21 @@ ], "documentation":"Retrieves estimated usage records for hourly granularity or resource-level data at daily granularity.
" }, + "GetCommitmentPurchaseAnalysis":{ + "name":"GetCommitmentPurchaseAnalysis", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCommitmentPurchaseAnalysisRequest"}, + "output":{"shape":"GetCommitmentPurchaseAnalysisResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AnalysisNotFoundException"}, + {"shape":"DataUnavailableException"} + ], + "documentation":"Retrieves a commitment purchase analysis result based on the AnalysisId
.
Retrieves a forecast for how much Amazon Web Services predicts that you will use over the forecast time period that you select, based on your past usage.
" }, + "ListCommitmentPurchaseAnalyses":{ + "name":"ListCommitmentPurchaseAnalyses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCommitmentPurchaseAnalysesRequest"}, + "output":{"shape":"ListCommitmentPurchaseAnalysesResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"DataUnavailableException"} + ], + "documentation":"Lists the commitment purchase analyses for your account based on the last 30 days.
" + }, "ListCostAllocationTagBackfillHistory":{ "name":"ListCostAllocationTagBackfillHistory", "http":{ @@ -499,6 +529,22 @@ ], "documentation":"Modifies the feedback property of a given cost anomaly.
" }, + "StartCommitmentPurchaseAnalysis":{ + "name":"StartCommitmentPurchaseAnalysis", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartCommitmentPurchaseAnalysisRequest"}, + "output":{"shape":"StartCommitmentPurchaseAnalysisResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"DataUnavailableException"}, + {"shape":"GenerationExistsException"} + ], + "documentation":"Specifies the parameters of a planned commitment purchase and starts the generation of the analysis. This enables you to estimate the cost, coverage, and utilization impact of your planned commitment purchases. You can request up to 20 analysis runs per day.
" + }, "StartCostAllocationTagBackfill":{ "name":"StartCostAllocationTagBackfill", "http":{ @@ -617,6 +663,12 @@ } }, "shapes":{ + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]{12}" + }, "AccountScope":{ "type":"string", "enum":[ @@ -626,6 +678,89 @@ }, "AmortizedRecurringFee":{"type":"string"}, "AmortizedUpfrontFee":{"type":"string"}, + "AnalysisDetails":{ + "type":"structure", + "members":{ + "SavingsPlansPurchaseAnalysisDetails":{ + "shape":"SavingsPlansPurchaseAnalysisDetails", + "documentation":"Details about the Savings Plans purchase analysis.
" + } + }, + "documentation":"Details about the analysis.
" + }, + "AnalysisId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[\\S\\s]{8}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{12}$" + }, + "AnalysisIds":{ + "type":"list", + "member":{"shape":"AnalysisId"}, + "max":600, + "min":0 + }, + "AnalysisNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"The requested analysis can't be found.
", + "exception":true + }, + "AnalysisStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "PROCESSING", + "FAILED" + ] + }, + "AnalysisSummary":{ + "type":"structure", + "members":{ + "EstimatedCompletionTime":{ + "shape":"ZonedDateTime", + "documentation":"The estimated time for when the analysis will complete.
" + }, + "AnalysisCompletionTime":{ + "shape":"ZonedDateTime", + "documentation":"The completion time of the analysis.
" + }, + "AnalysisStartedTime":{ + "shape":"ZonedDateTime", + "documentation":"The start time of the analysis.
" + }, + "AnalysisStatus":{ + "shape":"AnalysisStatus", + "documentation":"The status of the analysis.
" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"The error code used for the analysis.
" + }, + "AnalysisId":{ + "shape":"AnalysisId", + "documentation":"The analysis ID that's associated with the commitment purchase.
" + }, + "CommitmentPurchaseAnalysisConfiguration":{ + "shape":"CommitmentPurchaseAnalysisConfiguration", + "documentation":"The analysis configuration for the commitment purchase analysis.
" + } + }, + "documentation":"A summary of the analysis.
" + }, + "AnalysisSummaryList":{ + "type":"list", + "member":{"shape":"AnalysisSummary"} + }, + "AnalysisType":{ + "type":"string", + "enum":[ + "MAX_SAVINGS", + "CUSTOM_COMMITMENT" + ] + }, "Anomalies":{ "type":"list", "member":{"shape":"Anomaly"} @@ -653,7 +788,7 @@ }, "DimensionValue":{ "shape":"GenericString", - "documentation":"The dimension for the anomaly (for example, an Amazon Web Servicesservice in a service monitor).
" + "documentation":"The dimension for the anomaly (for example, an Amazon Web Services service in a service monitor).
" }, "RootCauses":{ "shape":"RootCauses", @@ -865,6 +1000,16 @@ "documentation":"The requested report expired. Update the date interval and try again.
", "exception":true }, + "CommitmentPurchaseAnalysisConfiguration":{ + "type":"structure", + "members":{ + "SavingsPlansPurchaseAnalysisConfiguration":{ + "shape":"SavingsPlansPurchaseAnalysisConfiguration", + "documentation":"The configuration for the Savings Plans purchase analysis.
" + } + }, + "documentation":"The configuration for the commitment purchase analysis.
" + }, "Context":{ "type":"string", "enum":[ @@ -1136,7 +1281,7 @@ "Value":{"shape":"CostCategoryValue"}, "Rule":{ "shape":"Expression", - "documentation":"An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT
, SERVICE_CODE
, RECORD_TYPE
, LINKED_ACCOUNT_NAME
, REGION
, and USAGE_TYPE
.
RECORD_TYPE
is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT
, SERVICE_CODE
, RECORD_TYPE
, LINKED_ACCOUNT_NAME
, REGION
, USAGE_TYPE
, and BILLING_ENTITY
.
RECORD_TYPE
is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
Details about the Amazon ElastiCache reservations that Amazon Web Services recommends that you purchase.
" }, "Entity":{"type":"string"}, + "ErrorCode":{ + "type":"string", + "enum":[ + "NO_USAGE_FOUND", + "INTERNAL_FAILURE", + "INVALID_SAVINGS_PLANS_TO_ADD", + "INVALID_SAVINGS_PLANS_TO_EXCLUDE", + "INVALID_ACCOUNT_ID" + ] + }, "ErrorMessage":{"type":"string"}, "Estimated":{"type":"boolean"}, "Expression":{ @@ -2219,6 +2374,60 @@ } } }, + "GetCommitmentPurchaseAnalysisRequest":{ + "type":"structure", + "required":["AnalysisId"], + "members":{ + "AnalysisId":{ + "shape":"AnalysisId", + "documentation":"The analysis ID that's associated with the commitment purchase analysis.
" + } + } + }, + "GetCommitmentPurchaseAnalysisResponse":{ + "type":"structure", + "required":[ + "EstimatedCompletionTime", + "AnalysisStartedTime", + "AnalysisId", + "AnalysisStatus", + "CommitmentPurchaseAnalysisConfiguration" + ], + "members":{ + "EstimatedCompletionTime":{ + "shape":"ZonedDateTime", + "documentation":"The estimated time for when the analysis will complete.
" + }, + "AnalysisCompletionTime":{ + "shape":"ZonedDateTime", + "documentation":"The completion time of the analysis.
" + }, + "AnalysisStartedTime":{ + "shape":"ZonedDateTime", + "documentation":"The start time of the analysis.
" + }, + "AnalysisId":{ + "shape":"AnalysisId", + "documentation":"The analysis ID that's associated with the commitment purchase analysis.
" + }, + "AnalysisStatus":{ + "shape":"AnalysisStatus", + "documentation":"The status of the analysis.
" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"The error code used for the analysis.
" + }, + "AnalysisDetails":{ + "shape":"AnalysisDetails", + "documentation":"Details about the analysis.
" + }, + "CommitmentPurchaseAnalysisConfiguration":{ + "shape":"CommitmentPurchaseAnalysisConfiguration", + "documentation":"The configuration for the commitment purchase analysis.
" + } + } + }, "GetCostAndUsageRequest":{ "type":"structure", "required":[ @@ -2447,7 +2656,7 @@ }, "Context":{ "shape":"Context", - "documentation":"The context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. The default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values can be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, the resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following:
- Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Servicesservices.
- AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Servicesservices in India.
- Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized
(for example, C4
, C5
, C6g
, and C7g
), Memory Optimization
(for example, R4
, R5n
, R5b
, and R6g
).
INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice.
LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
OPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances.
RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance.
SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).
SERVICE - The Amazon Web Services service such as Amazon DynamoDB.
TENANCY - The tenancy of a resource. Examples are shared or dedicated.
USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute.
REGION - The Amazon Web Services Region.
RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits.
RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
If you set the context to RESERVATIONS
, you can use the following dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
REGION - The Amazon Web Services Region.
SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
TENANCY - The tenancy of a resource. Examples are shared or dedicated.
If you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront)
REGION - The Amazon Web Services Region.
INSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
The context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. The default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values can be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, the resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following:
- Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services services.
- AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Services services in India.
- Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized
(for example, C4
, C5
, C6g
, and C7g
), Memory Optimization
(for example, R4
, R5n
, R5b
, and R6g
).
INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice.
LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
OPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances.
RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance.
SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).
SERVICE - The Amazon Web Services service such as Amazon DynamoDB.
TENANCY - The tenancy of a resource. Examples are shared or dedicated.
USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute.
REGION - The Amazon Web Services Region.
RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits.
RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
If you set the context to RESERVATIONS
, you can use the following dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
REGION - The Amazon Web Services Region.
SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
TENANCY - The tenancy of a resource. Examples are shared or dedicated.
If you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront)
REGION - The Amazon Web Services Region.
INSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account.
SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
Filters utilization data by dimensions. You can filter by the following dimensions:
AZ
CACHE_ENGINE
DEPLOYMENT_OPTION
INSTANCE_TYPE
LINKED_ACCOUNT
OPERATING_SYSTEM
PLATFORM
REGION
SERVICE
SCOPE
TENANCY
GetReservationUtilization
uses the same Expression object as the other operations, but only AND
is supported among each dimension, and nesting is supported up to only one level deep. If there are multiple values for a dimension, they are OR'd together.
Filters utilization data by dimensions. You can filter by the following dimensions:
AZ
CACHE_ENGINE
DEPLOYMENT_OPTION
INSTANCE_TYPE
LINKED_ACCOUNT
OPERATING_SYSTEM
PLATFORM
REGION
SERVICE
If not specified, the SERVICE
filter defaults to Amazon Elastic Compute Cloud - Compute. Supported values for SERVICE
are Amazon Elastic Compute Cloud - Compute, Amazon Relational Database Service, Amazon ElastiCache, Amazon Redshift, and Amazon Elasticsearch Service. The value for the SERVICE
filter should not exceed \"1\".
SCOPE
TENANCY
GetReservationUtilization
uses the same Expression object as the other operations, but only AND
is supported among each dimension, and nesting is supported up to only one level deep. If there are multiple values for a dimension, they are OR'd together.
You made too many calls in a short period of time. Try again later.
", "exception":true }, + "ListCommitmentPurchaseAnalysesRequest":{ + "type":"structure", + "members":{ + "AnalysisStatus":{ + "shape":"AnalysisStatus", + "documentation":"The status of the analysis.
" + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"The token to retrieve the next set of results.
" + }, + "PageSize":{ + "shape":"NonNegativeInteger", + "documentation":"The number of analyses that you want returned in a single response object.
" + }, + "AnalysisIds":{ + "shape":"AnalysisIds", + "documentation":"The analysis IDs associated with the commitment purchase analyses.
" + } + } + }, + "ListCommitmentPurchaseAnalysesResponse":{ + "type":"structure", + "members":{ + "AnalysisSummaryList":{ + "shape":"AnalysisSummaryList", + "documentation":"The list of analyses.
" + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"The token to retrieve the next set of results.
" + } + } + }, "ListCostAllocationTagBackfillHistoryRequest":{ "type":"structure", "members":{ @@ -4327,7 +4570,7 @@ "members":{ "Service":{ "shape":"GenericString", - "documentation":"The Amazon Web Servicesservice name that's associated with the cost anomaly.
" + "documentation":"The Amazon Web Services service name that's associated with the cost anomaly.
" }, "Region":{ "shape":"GenericString", @@ -4346,13 +4589,47 @@ "documentation":"The member account name value that's associated with the cost anomaly.
" } }, - "documentation":"The combination of Amazon Web Servicesservice, linked account, linked account name, Region, and usage type where a cost anomaly is observed. The linked account name will only be available when the account name can be identified.
" + "documentation":"The combination of Amazon Web Services service, linked account, linked account name, Region, and usage type where a cost anomaly is observed. The linked account name will only be available when the account name can be identified.
" }, "RootCauses":{ "type":"list", "member":{"shape":"RootCause"} }, "SavingsPlanArn":{"type":"string"}, + "SavingsPlans":{ + "type":"structure", + "members":{ + "PaymentOption":{ + "shape":"PaymentOption", + "documentation":"The payment option for the Savings Plans commitment.
" + }, + "SavingsPlansType":{ + "shape":"SupportedSavingsPlansType", + "documentation":"The Savings Plans type.
" + }, + "Region":{ + "shape":"GenericString", + "documentation":"The Region associated with the Savings Plans commitment.
" + }, + "InstanceFamily":{ + "shape":"GenericString", + "documentation":"The instance family of the Savings Plans commitment.
" + }, + "TermInYears":{ + "shape":"TermInYears", + "documentation":"The term that you want the Savings Plans commitment for.
" + }, + "SavingsPlansCommitment":{ + "shape":"SavingsPlansCommitment", + "documentation":"The Savings Plans commitment.
" + }, + "OfferingId":{ + "shape":"GenericString", + "documentation":"The unique ID that's used to distinguish commitments from one another.
" + } + }, + "documentation":"The Savings Plans commitment details.
" + }, "SavingsPlansAmortizedCommitment":{ "type":"structure", "members":{ @@ -4371,6 +4648,11 @@ }, "documentation":"The amortized amount of Savings Plans purchased in a specific account during a specific time interval.
" }, + "SavingsPlansCommitment":{ + "type":"double", + "max":5000, + "min":0.001 + }, "SavingsPlansCoverage":{ "type":"structure", "members":{ @@ -4443,6 +4725,141 @@ }, "documentation":"The attribute details on a specific Savings Plan.
" }, + "SavingsPlansId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[\\S\\s]{8}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{12}$" + }, + "SavingsPlansPurchaseAnalysisConfiguration":{ + "type":"structure", + "required":[ + "AnalysisType", + "SavingsPlansToAdd", + "LookBackTimePeriod" + ], + "members":{ + "AccountScope":{ + "shape":"AccountScope", + "documentation":"The account scope that you want your analysis for.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The account that the analysis is for.
" + }, + "AnalysisType":{ + "shape":"AnalysisType", + "documentation":"The type of analysis.
" + }, + "SavingsPlansToAdd":{ + "shape":"SavingsPlansToAdd", + "documentation":"Savings Plans to include in the analysis.
" + }, + "SavingsPlansToExclude":{ + "shape":"SavingsPlansToExclude", + "documentation":"Savings Plans to exclude from the analysis.
" + }, + "LookBackTimePeriod":{ + "shape":"DateInterval", + "documentation":"The time period associated with the analysis.
" + } + }, + "documentation":"The configuration for the Savings Plans purchase analysis.
" + }, + "SavingsPlansPurchaseAnalysisDetails":{ + "type":"structure", + "members":{ + "CurrencyCode":{ + "shape":"GenericString", + "documentation":"The currency code used for the analysis.
" + }, + "LookbackPeriodInHours":{ + "shape":"GenericString", + "documentation":"The lookback period in hours that's used to generate the analysis.
" + }, + "CurrentAverageCoverage":{ + "shape":"GenericString", + "documentation":"The average value of hourly coverage over the lookback period.
" + }, + "CurrentAverageHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The average value of hourly On-Demand spend over the lookback period.
" + }, + "CurrentMaximumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The highest value of hourly On-Demand spend over the lookback period.
" + }, + "CurrentMinimumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The lowest value of hourly On-Demand spend over the lookback period.
" + }, + "CurrentOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The current total On-Demand spend over the lookback period.
" + }, + "ExistingHourlyCommitment":{ + "shape":"GenericString", + "documentation":"The existing hourly commitment for the Savings Plan type.
" + }, + "HourlyCommitmentToPurchase":{ + "shape":"GenericString", + "documentation":"The recommended or custom hourly commitment.
" + }, + "EstimatedAverageCoverage":{ + "shape":"GenericString", + "documentation":"The estimated coverage of the Savings Plan.
" + }, + "EstimatedAverageUtilization":{ + "shape":"GenericString", + "documentation":"The estimated utilization of the Savings Plan.
" + }, + "EstimatedMonthlySavingsAmount":{ + "shape":"GenericString", + "documentation":"The estimated monthly savings amount based on the Savings Plan.
" + }, + "EstimatedOnDemandCost":{ + "shape":"GenericString", + "documentation":"The remaining On-Demand cost estimated to not be covered by the commitment, over the length of the lookback period.
" + }, + "EstimatedOnDemandCostWithCurrentCommitment":{ + "shape":"GenericString", + "documentation":"The estimated On-Demand cost you expect with no additional commitment, based on your usage of the selected time period and the Savings Plan you own.
" + }, + "EstimatedROI":{ + "shape":"GenericString", + "documentation":"The estimated return on investment that's based on the purchase commitment and estimated savings. This is calculated as estimatedSavingsAmount/estimatedSPCost*100.
" + }, + "EstimatedSavingsAmount":{ + "shape":"GenericString", + "documentation":"The estimated savings amount that's based on the purchase commitment over the length of the lookback period.
" + }, + "EstimatedSavingsPercentage":{ + "shape":"GenericString", + "documentation":"The estimated savings percentage relative to the total cost over the cost calculation lookback period.
" + }, + "EstimatedCommitmentCost":{ + "shape":"GenericString", + "documentation":"The estimated cost of the purchase commitment over the length of the lookback period.
" + }, + "LatestUsageTimestamp":{ + "shape":"GenericString", + "documentation":"The date and time of the last hour that went into the analysis.
" + }, + "UpfrontCost":{ + "shape":"GenericString", + "documentation":"The upfront cost of the Savings Plan, based on the selected payment option.
" + }, + "AdditionalMetadata":{ + "shape":"GenericString", + "documentation":"Additional metadata that might be applicable to the commitment.
" + }, + "MetricsOverLookbackPeriod":{ + "shape":"MetricsOverLookbackPeriod", + "documentation":"The related hourly cost, coverage, and utilization metrics over the lookback period.
" + } + }, + "documentation":"Details about the Savings Plans purchase analysis.
" + }, "SavingsPlansPurchaseRecommendation":{ "type":"structure", "members":{ @@ -4637,6 +5054,18 @@ }, "documentation":"The amount of savings that you're accumulating, against the public On-Demand rate of the usage accrued in an account.
" }, + "SavingsPlansToAdd":{ + "type":"list", + "member":{"shape":"SavingsPlans"}, + "max":1, + "min":1 + }, + "SavingsPlansToExclude":{ + "type":"list", + "member":{"shape":"SavingsPlansId"}, + "max":1000, + "min":0 + }, "SavingsPlansUtilization":{ "type":"structure", "members":{ @@ -4791,6 +5220,38 @@ "DESCENDING" ] }, + "StartCommitmentPurchaseAnalysisRequest":{ + "type":"structure", + "required":["CommitmentPurchaseAnalysisConfiguration"], + "members":{ + "CommitmentPurchaseAnalysisConfiguration":{ + "shape":"CommitmentPurchaseAnalysisConfiguration", + "documentation":"The configuration for the commitment purchase analysis.
" + } + } + }, + "StartCommitmentPurchaseAnalysisResponse":{ + "type":"structure", + "required":[ + "AnalysisId", + "AnalysisStartedTime", + "EstimatedCompletionTime" + ], + "members":{ + "AnalysisId":{ + "shape":"AnalysisId", + "documentation":"The analysis ID that's associated with the commitment purchase analysis.
" + }, + "AnalysisStartedTime":{ + "shape":"ZonedDateTime", + "documentation":"The start time of the analysis.
" + }, + "EstimatedCompletionTime":{ + "shape":"ZonedDateTime", + "documentation":"The estimated time for when the analysis will complete.
" + } + } + }, "StartCostAllocationTagBackfillRequest":{ "type":"structure", "required":["BackfillFrom"], diff --git a/botocore/data/cloudfront/2020-05-31/service-2.json b/botocore/data/cloudfront/2020-05-31/service-2.json index 3685ab76b5..6f29f51d32 100644 --- a/botocore/data/cloudfront/2020-05-31/service-2.json +++ b/botocore/data/cloudfront/2020-05-31/service-2.json @@ -9314,9 +9314,13 @@ "Members":{ "shape":"OriginGroupMembers", "documentation":"A complex type that contains information about the origins in an origin group.
" + }, + "SelectionCriteria":{ + "shape":"OriginGroupSelectionCriteria", + "documentation":"The selection criteria for the origin group. For more information, see Create an origin group in the Amazon CloudFront Developer Guide.
" } }, - "documentation":"An origin group includes two origins (a primary origin and a second origin to failover to) and a failover criteria that you specify. You create an origin group to support origin failover in CloudFront. When you create or update a distribution, you can specify the origin group instead of a single origin, and CloudFront will failover from the primary origin to the second origin under the failover conditions that you've chosen.
" + "documentation":"An origin group includes two origins (a primary origin and a secondary origin to failover to) and a failover criteria that you specify. You create an origin group to support origin failover in CloudFront. When you create or update a distribution, you can specify the origin group instead of a single origin, and CloudFront will failover from the primary origin to the secondary origin under the failover conditions that you've chosen.
Optionally, you can choose selection criteria for your origin group to specify how your origins are selected when your distribution routes viewer requests.
" }, "OriginGroupFailoverCriteria":{ "type":"structure", @@ -9376,6 +9380,13 @@ }, "documentation":"A complex data type for the origins included in an origin group.
" }, + "OriginGroupSelectionCriteria":{ + "type":"string", + "enum":[ + "default", + "media-quality-based" + ] + }, "OriginGroups":{ "type":"structure", "required":["Quantity"], diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 765509aa2c..c40007a132 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -41,7 +41,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"ConflictException"} ], - "documentation":"Adds one or more tags to a trail, event data store, or channel, up to a limit of 50. Overwrites an existing tag's value when a new value is specified for an existing tag key. Tag key names must be unique; you cannot have two keys with the same name but different values. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail or event data store that applies to all Amazon Web Services Regions only from the Region in which the trail or event data store was created (also known as its home Region).
", + "documentation":"Adds one or more tags to a trail, event data store, dashboard, or channel, up to a limit of 50. Overwrites an existing tag's value when a new value is specified for an existing tag key. Tag key names must be unique; you cannot have two keys with the same name but different values. If you specify a key without a value, the tag will be created with the specified key and a value of null. You can tag a trail or event data store that applies to all Amazon Web Services Regions only from the Region in which the trail or event data store was created (also known as its home Region).
", "idempotent":true }, "CancelQuery":{ @@ -91,6 +91,27 @@ ], "documentation":"Creates a channel for CloudTrail to ingest events from a partner or external source. After you create a channel, a CloudTrail Lake event data store can log events from the partner or source that you specify.
" }, + "CreateDashboard":{ + "name":"CreateDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDashboardRequest"}, + "output":{"shape":"CreateDashboardResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"InvalidTagParameterException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InactiveEventDataStoreException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"InvalidQueryStatementException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Creates a custom dashboard or the Highlights dashboard.
Custom dashboards - Custom dashboards allow you to query events in any event data store type. You can add up to 10 widgets to a custom dashboard. You can manually refresh a custom dashboard, or you can set a refresh schedule.
Highlights dashboard - You can create the Highlights dashboard to see a summary of key user activities and API usage across all your event data stores. CloudTrail Lake manages the Highlights dashboard and refreshes the dashboard every 6 hours. To create the Highlights dashboard, you must set and enable a refresh schedule.
CloudTrail runs queries to populate the dashboard's widgets during a manual or scheduled refresh. CloudTrail must be granted permissions to run the StartQuery
operation on your behalf. To provide permissions, run the PutResourcePolicy
operation to attach a resource-based policy to each event data store. For more information, see Example: Allow CloudTrail to run queries to populate a dashboard in the CloudTrail User Guide.
To set a refresh schedule, CloudTrail must be granted permissions to run the StartDashboardRefresh
operation to refresh the dashboard on your behalf. To provide permissions, run the PutResourcePolicy
operation to attach a resource-based policy to the dashboard. For more information, see Resource-based policy example for a dashboard in the CloudTrail User Guide.
For more information about dashboards, see CloudTrail Lake dashboards in the CloudTrail User Guide.
", + "idempotent":true + }, "CreateEventDataStore":{ "name":"CreateEventDataStore", "http":{ @@ -183,6 +204,22 @@ ], "documentation":"Deletes a channel.
" }, + "DeleteDashboard":{ + "name":"DeleteDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDashboardRequest"}, + "output":{"shape":"DeleteDashboardResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Deletes the specified dashboard. You cannot delete a dashboard that has termination protection enabled.
", + "idempotent":true + }, "DeleteEventDataStore":{ "name":"DeleteEventDataStore", "http":{ @@ -222,10 +259,11 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourcePolicyNotFoundException"}, {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"ConflictException"}, {"shape":"OperationNotPermittedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"Deletes the resource-based policy attached to the CloudTrail channel.
", + "documentation":"Deletes the resource-based policy attached to the CloudTrail event data store, dashboard, or channel.
", "idempotent":true }, "DeleteTrail":{ @@ -294,7 +332,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. If the query results were delivered to an S3 bucket, the response also provides the S3 URI and the delivery status.
You must specify either a QueryID
or a QueryAlias
. Specifying the QueryAlias
parameter returns information about the last query run for the alias.
Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query status. If the query results were delivered to an S3 bucket, the response also provides the S3 URI and the delivery status.
You must specify either QueryId
or QueryAlias
. Specifying the QueryAlias
parameter returns information about the last query run for the alias. You can provide RefreshId
along with QueryAlias
to view the query results of a dashboard query for the specified RefreshId
.
Returns information about a specific channel.
", "idempotent":true }, + "GetDashboard":{ + "name":"GetDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDashboardRequest"}, + "output":{"shape":"GetDashboardResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Returns the specified dashboard.
", + "idempotent":true + }, "GetEventDataStore":{ "name":"GetEventDataStore", "http":{ @@ -522,7 +575,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"Retrieves the JSON text of the resource-based policy document attached to the CloudTrail channel.
", + "documentation":"Retrieves the JSON text of the resource-based policy document attached to the CloudTrail event data store, dashboard, or channel.
", "idempotent":true }, "GetTrail":{ @@ -577,6 +630,20 @@ "documentation":"Lists the channels in the current account, and their source names.
", "idempotent":true }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDashboardsRequest"}, + "output":{"shape":"ListDashboardsResponse"}, + "errors":[ + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Returns information about all dashboards in the account, in the current Region.
", + "idempotent":true + }, "ListEventDataStores":{ "name":"ListEventDataStores", "http":{ @@ -709,7 +776,7 @@ {"shape":"InvalidTokenException"}, {"shape":"NoManagementAccountSLRExistsException"} ], - "documentation":"Lists the tags for the specified trails, event data stores, or channels in the current Region.
", + "documentation":"Lists the tags for the specified trails, event data stores, dashboards, or channels in the current Region.
", "idempotent":true }, "ListTrails":{ @@ -814,10 +881,11 @@ {"shape":"ResourcePolicyNotValidException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ResourceTypeNotSupportedException"}, + {"shape":"ConflictException"}, {"shape":"OperationNotPermittedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"Attaches a resource-based permission policy to a CloudTrail channel that is used for an integration with an event source outside of Amazon Web Services. For more information about resource-based policies, see CloudTrail resource-based policy examples in the CloudTrail User Guide.
", + "documentation":"Attaches a resource-based permission policy to a CloudTrail event data store, dashboard, or channel. For more information about resource-based policies, see CloudTrail resource-based policy examples in the CloudTrail User Guide.
", "idempotent":true }, "RegisterOrganizationDelegatedAdmin":{ @@ -871,7 +939,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"ConflictException"} ], - "documentation":"Removes the specified tags from a trail, event data store, or channel.
", + "documentation":"Removes the specified tags from a trail, event data store, dashboard, or channel.
", "idempotent":true }, "RestoreEventDataStore":{ @@ -899,6 +967,24 @@ ], "documentation":"Restores a deleted event data store specified by EventDataStore
, which accepts an event data store ARN. You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store can take several minutes, depending on the size of the event data store.
Starts a refresh of the specified dashboard.
Each time a dashboard is refreshed, CloudTrail runs queries to populate the dashboard's widgets. CloudTrail must be granted permissions to run the StartQuery
operation on your behalf. To provide permissions, run the PutResourcePolicy
operation to attach a resource-based policy to each event data store. For more information, see Example: Allow CloudTrail to run queries to populate a dashboard in the CloudTrail User Guide.
Updates a channel specified by a required channel ARN or UUID.
", "idempotent":true }, + "UpdateDashboard":{ + "name":"UpdateDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDashboardRequest"}, + "output":{"shape":"UpdateDashboardResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InactiveEventDataStoreException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"InvalidQueryStatementException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Updates the specified dashboard.
To set a refresh schedule, CloudTrail must be granted permissions to run the StartDashboardRefresh
operation to refresh the dashboard on your behalf. To provide permissions, run the PutResourcePolicy
operation to attach a resource-based policy to the dashboard. For more information, see Resource-based policy example for a dashboard in the CloudTrail User Guide.
CloudTrail runs queries to populate the dashboard's widgets during a manual or scheduled refresh. CloudTrail must be granted permissions to run the StartQuery
operation on your behalf. To provide permissions, run the PutResourcePolicy
operation to attach a resource-based policy to each event data store. For more information, see Example: Allow CloudTrail to run queries to populate a dashboard in the CloudTrail User Guide.
Specifies the ARN of the trail, event data store, or channel to which one or more tags will be added.
The format of a trail ARN is: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The format of an event data store ARN is: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
The format of a channel ARN is: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
Specifies the ARN of the trail, event data store, dashboard, or channel to which one or more tags will be added.
The format of a trail ARN is: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The format of an event data store ARN is: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
The format of a dashboard ARN is: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
The format of a channel ARN is: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
Contains a list of tags, up to a limit of 50
" } }, - "documentation":"Specifies the tags to add to a trail, event data store, or channel.
" + "documentation":"Specifies the tags to add to a trail, event data store, dashboard, or channel.
" }, "AddTagsResponse":{ "type":"structure", @@ -1310,6 +1417,10 @@ "QueryId":{ "shape":"UUID", "documentation":"The ID of the query that you want to cancel. The QueryId
comes from the response of a StartQuery
operation.
The account ID of the event data store owner.
" } } }, @@ -1327,6 +1438,10 @@ "QueryStatus":{ "shape":"QueryStatus", "documentation":"Shows the status of a query after a CancelQuery
request. Typically, the values shown are either RUNNING
or CANCELLED
.
The account ID of the event data store owner.
" } } }, @@ -1406,7 +1521,7 @@ "type":"structure", "members":{ }, - "documentation":"This exception is thrown when an operation is called with an ARN that is not valid.
The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
This exception is thrown when an operation is called with an ARN that is not valid.
The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
The name of the dashboard. The name must be unique to your account.
To create the Highlights dashboard, the name must be AWSCloudTrail-Highlights
.
The refresh schedule configuration for the dashboard.
To create the Highlights dashboard, you must set a refresh schedule and set the Status
to ENABLED
. The Unit
for the refresh schedule must be HOURS
and the Value
must be 6
.
Specifies whether termination protection is enabled for the dashboard. If termination protection is enabled, you cannot delete the dashboard until termination protection is disabled.
" + }, + "Widgets":{ + "shape":"RequestWidgetList", + "documentation":"An array of widgets for a custom dashboard. A custom dashboard can have a maximum of ten widgets.
You do not need to specify widgets for the Highlights dashboard.
" + } + } + }, + "CreateDashboardResponse":{ + "type":"structure", + "members":{ + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"The ARN for the dashboard.
" + }, + "Name":{ + "shape":"DashboardName", + "documentation":"The name of the dashboard.
" + }, + "Type":{ + "shape":"DashboardType", + "documentation":"The dashboard type.
" + }, + "Widgets":{ + "shape":"WidgetList", + "documentation":"An array of widgets for the dashboard.
" + }, + "TagsList":{"shape":"TagsList"}, + "RefreshSchedule":{ + "shape":"RefreshSchedule", + "documentation":"The refresh schedule for the dashboard, if configured.
" + }, + "TerminationProtectionEnabled":{ + "shape":"TerminationProtectionEnabled", + "documentation":"Indicates whether termination protection is enabled for the dashboard.
" + } + } + }, "CreateEventDataStoreRequest":{ "type":"structure", "required":["Name"], @@ -1700,6 +1868,51 @@ }, "documentation":"Returns the objects or data listed below if successful. Otherwise, returns an error.
" }, + "DashboardArn":{ + "type":"string", + "pattern":"^[a-zA-Z0-9._/\\-:]+$" + }, + "DashboardDetail":{ + "type":"structure", + "members":{ + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"The ARN for the dashboard.
" + }, + "Type":{ + "shape":"DashboardType", + "documentation":"The type of dashboard.
" + } + }, + "documentation":"Provides information about a CloudTrail Lake dashboard.
" + }, + "DashboardName":{ + "type":"string", + "max":128, + "min":3, + "pattern":"^[a-zA-Z0-9_\\-]+$" + }, + "DashboardStatus":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "UPDATING", + "UPDATED", + "DELETING" + ] + }, + "DashboardType":{ + "type":"string", + "enum":[ + "MANAGED", + "CUSTOM" + ] + }, + "Dashboards":{ + "type":"list", + "member":{"shape":"DashboardDetail"} + }, "DataResource":{ "type":"structure", "members":{ @@ -1745,6 +1958,21 @@ "members":{ } }, + "DeleteDashboardRequest":{ + "type":"structure", + "required":["DashboardId"], + "members":{ + "DashboardId":{ + "shape":"DashboardArn", + "documentation":"The name or ARN for the dashboard.
" + } + } + }, + "DeleteDashboardResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEventDataStoreRequest":{ "type":"structure", "required":["EventDataStore"], @@ -1766,7 +1994,7 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":" The Amazon Resource Name (ARN) of the CloudTrail channel you're deleting the resource-based policy from. The following is the format of a resource ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/MyChannel
.
The Amazon Resource Name (ARN) of the CloudTrail event data store, dashboard, or channel you're deleting the resource-based policy from.
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
The alias that identifies a query template.
" + }, + "RefreshId":{ + "shape":"RefreshId", + "documentation":"The ID of the dashboard refresh.
" + }, + "EventDataStoreOwnerAccountId":{ + "shape":"AccountId", + "documentation":"The account ID of the event data store owner.
" } } }, @@ -1881,6 +2117,10 @@ "Prompt":{ "shape":"Prompt", "documentation":"The prompt used for a generated query. For information about generated queries, see Create CloudTrail Lake queries from natural language prompts in the CloudTrail user guide.
" + }, + "EventDataStoreOwnerAccountId":{ + "shape":"AccountId", + "documentation":"The account ID of the event data store owner.
" } } }, @@ -2290,6 +2530,10 @@ "QueryAlias":{ "shape":"QueryAlias", "documentation":" An alias that identifies the prompt. When you run the StartQuery
operation, you can pass in either the QueryAlias
or QueryStatement
parameter.
The account ID of the event data store owner.
" } } }, @@ -2339,6 +2583,61 @@ } } }, + "GetDashboardRequest":{ + "type":"structure", + "required":["DashboardId"], + "members":{ + "DashboardId":{ + "shape":"DashboardArn", + "documentation":"The name or ARN for the dashboard.
" + } + } + }, + "GetDashboardResponse":{ + "type":"structure", + "members":{ + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"The ARN for the dashboard.
" + }, + "Type":{ + "shape":"DashboardType", + "documentation":"The type of dashboard.
" + }, + "Status":{ + "shape":"DashboardStatus", + "documentation":"The status of the dashboard.
" + }, + "Widgets":{ + "shape":"WidgetList", + "documentation":"An array of widgets for the dashboard.
" + }, + "RefreshSchedule":{ + "shape":"RefreshSchedule", + "documentation":"The refresh schedule for the dashboard, if configured.
" + }, + "CreatedTimestamp":{ + "shape":"Date", + "documentation":"The timestamp that shows when the dashboard was created.
" + }, + "UpdatedTimestamp":{ + "shape":"Date", + "documentation":"The timestamp that shows when the dashboard was last updated.
" + }, + "LastRefreshId":{ + "shape":"RefreshId", + "documentation":"The ID of the last dashboard refresh.
" + }, + "LastRefreshFailureReason":{ + "shape":"ErrorMessage", + "documentation":"Provides information about failures for the last scheduled refresh.
" + }, + "TerminationProtectionEnabled":{ + "shape":"TerminationProtectionEnabled", + "documentation":"Indicates whether termination protection is enabled for the dashboard.
" + } + } + }, "GetEventDataStoreRequest":{ "type":"structure", "required":["EventDataStore"], @@ -2547,6 +2846,10 @@ "MaxQueryResults":{ "shape":"MaxQueryResults", "documentation":"The maximum number of query results to display on a single page.
" + }, + "EventDataStoreOwnerAccountId":{ + "shape":"AccountId", + "documentation":"The account ID of the event data store owner.
" } } }, @@ -2581,7 +2884,7 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":" The Amazon Resource Name (ARN) of the CloudTrail channel attached to the resource-based policy. The following is the format of a resource ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/MyChannel
.
The Amazon Resource Name (ARN) of the CloudTrail event data store, dashboard, or channel attached to the resource-based policy.
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
The Amazon Resource Name (ARN) of the CloudTrail channel attached to resource-based policy.
" + "documentation":"The Amazon Resource Name (ARN) of the CloudTrail event data store, dashboard, or channel attached to resource-based policy.
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
A JSON-formatted string that contains the resource-based policy attached to the CloudTrail channel.
" + "documentation":"A JSON-formatted string that contains the resource-based policy attached to the CloudTrail event data store, dashboard, or channel.
" + }, + "DelegatedAdminResourcePolicy":{ + "shape":"ResourcePolicy", + "documentation":"The default resource-based policy that is automatically generated for the delegated administrator of an Organizations organization. This policy will be evaluated in tandem with any policy you submit for the resource. For more information about this policy, see Default resource policy for delegated administrators.
" } } }, @@ -2620,7 +2927,7 @@ "members":{ "Name":{ "shape":"String", - "documentation":"Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another Region), you must specify its ARN. The following is the format of a trail ARN.
arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another Region), you must specify its ARN.
The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
If the trail is an organization trail and you are a member account in the organization in Organizations, you must provide the full ARN of that trail, and not just the name.
The name of a trail about which you want the current status.
" @@ -2932,7 +3239,7 @@ "type":"structure", "members":{ }, - "documentation":"This exception is thrown when the policy on the S3 bucket or KMS key does not have sufficient permissions for the operation.
", + "documentation":"For the CreateTrail
PutInsightSelectors
, UpdateTrail
, StartQuery
, and StartImport
operations, this exception is thrown when the policy on the S3 bucket or KMS key does not have sufficient permissions for the operation.
For all other operations, this exception is thrown when the policy for the KMS key does not have sufficient permissions for the operation.
", "exception":true }, "InsufficientS3BucketPolicyException":{ @@ -3185,6 +3492,45 @@ } } }, + "ListDashboardsMaxResultsCount":{ + "type":"integer", + "max":1000, + "min":1 + }, + "ListDashboardsRequest":{ + "type":"structure", + "members":{ + "NamePrefix":{ + "shape":"DashboardName", + "documentation":"Specify a name prefix to filter on.
" + }, + "Type":{ + "shape":"DashboardType", + "documentation":" Specify a dashboard type to filter on: CUSTOM
or MANAGED
.
A token you can use to get the next page of dashboard results.
" + }, + "MaxResults":{ + "shape":"ListDashboardsMaxResultsCount", + "documentation":"The maximum number of dashboards to display on a single page.
" + } + } + }, + "ListDashboardsResponse":{ + "type":"structure", + "members":{ + "Dashboards":{ + "shape":"Dashboards", + "documentation":"Contains information about dashboards in the account, in the current Region that match the applied filters.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"A token you can use to get the next page of dashboard results.
" + } + } + }, "ListEventDataStoresMaxResultsCount":{ "type":"integer", "max":1000, @@ -3329,11 +3675,11 @@ }, "DataType":{ "shape":"InsightsMetricDataType", - "documentation":"Type of datapoints to return. Valid values are NonZeroData
and FillWithZeros
. The default is NonZeroData
.
Type of data points to return. Valid values are NonZeroData
and FillWithZeros
. The default is NonZeroData
.
The maximum number of datapoints to return. Valid values are integers from 1 to 21600. The default value is 21600.
" + "documentation":"The maximum number of data points to return. Valid values are integers from 1 to 21600. The default value is 21600.
" }, "NextToken":{ "shape":"InsightsMetricNextToken", @@ -3460,7 +3806,7 @@ "members":{ "ResourceIdList":{ "shape":"ResourceIdList", - "documentation":"Specifies a list of trail, event data store, or channel ARNs whose tags will be listed. The list has a limit of 20 ARNs.
Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
Specifies a list of trail, event data store, dashboard, or channel ARNs whose tags will be listed. The list has a limit of 20 ARNs.
Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
The Amazon Resource Name (ARN) of the CloudTrail channel attached to the resource-based policy. The following is the format of a resource ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/MyChannel
.
The Amazon Resource Name (ARN) of the CloudTrail event data store, dashboard, or channel attached to the resource-based policy.
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
A JSON-formatted string for an Amazon Web Services resource-based policy.
The following are requirements for the resource policy:
Contains only one action: cloudtrail-data:PutAuditEvents
Contains at least one statement. The policy can have a maximum of 20 statements.
Each statement contains at least one principal. A statement can have a maximum of 50 principals.
A JSON-formatted string for an Amazon Web Services resource-based policy.
For example resource-based policies, see CloudTrail resource-based policy examples in the CloudTrail User Guide.
" } } }, @@ -3847,11 +4193,15 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"The Amazon Resource Name (ARN) of the CloudTrail channel attached to the resource-based policy.
" + "documentation":"The Amazon Resource Name (ARN) of the CloudTrail event data store, dashboard, or channel attached to the resource-based policy.
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
The JSON-formatted string of the Amazon Web Services resource-based policy attached to the CloudTrail channel.
" + "documentation":"The JSON-formatted string of the Amazon Web Services resource-based policy attached to the CloudTrail event data store, dashboard, or channel.
" + }, + "DelegatedAdminResourcePolicy":{ + "shape":"ResourcePolicy", + "documentation":"The default resource-based policy that is automatically generated for the delegated administrator of an Organizations organization. This policy will be evaluated in tandem with any policy you submit for the resource. For more information about this policy, see Default resource policy for delegated administrators.
" } } }, @@ -3896,6 +4246,23 @@ "min":1, "pattern":".*" }, + "QueryParameterKey":{ + "type":"string", + "max":128, + "min":3, + "pattern":"^[a-zA-Z0-9._/\\-:$]+$" + }, + "QueryParameterValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9._/\\-:]+$" + }, + "QueryParameterValues":{ + "type":"map", + "key":{"shape":"QueryParameterKey"}, + "value":{"shape":"QueryParameterValue"} + }, "QueryParameters":{ "type":"list", "member":{"shape":"QueryParameter"}, @@ -3986,6 +4353,59 @@ "All" ] }, + "RefreshId":{ + "type":"string", + "max":20, + "min":10, + "pattern":"\\d+" + }, + "RefreshSchedule":{ + "type":"structure", + "members":{ + "Frequency":{ + "shape":"RefreshScheduleFrequency", + "documentation":"The frequency at which you want the dashboard refreshed.
" + }, + "Status":{ + "shape":"RefreshScheduleStatus", + "documentation":" Specifies whether the refresh schedule is enabled. Set the value to ENABLED
to enable the refresh schedule, or to DISABLED
to turn off the refresh schedule.
The time of day in UTC to run the schedule; for hourly only refer to minutes; default is 00:00.
" + } + }, + "documentation":"The schedule for a dashboard refresh.
" + }, + "RefreshScheduleFrequency":{ + "type":"structure", + "members":{ + "Unit":{ + "shape":"RefreshScheduleFrequencyUnit", + "documentation":"The unit to use for the refresh.
For custom dashboards, the unit can be HOURS
or DAYS
.
For the Highlights dashboard, the Unit
must be HOURS
.
The value for the refresh schedule.
For custom dashboards, the following values are valid when the unit is HOURS
: 1
, 6
, 12
, 24
For custom dashboards, the only valid value when the unit is DAYS
is 1
.
For the Highlights dashboard, the Value
must be 6
.
Specifies the frequency for a dashboard refresh schedule.
For a custom dashboard, you can schedule a refresh for every 1, 6, 12, or 24 hours, or every day.
" + }, + "RefreshScheduleFrequencyUnit":{ + "type":"string", + "enum":[ + "HOURS", + "DAYS" + ] + }, + "RefreshScheduleFrequencyValue":{"type":"integer"}, + "RefreshScheduleStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "RegisterOrganizationDelegatedAdminRequest":{ "type":"structure", "required":["MemberAccountId"], @@ -4012,14 +4432,14 @@ "members":{ "ResourceId":{ "shape":"String", - "documentation":"Specifies the ARN of the trail, event data store, or channel from which tags should be removed.
Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
Specifies the ARN of the trail, event data store, dashboard, or channel from which tags should be removed.
Example trail ARN format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
Example event data store ARN format: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
Example dashboard ARN format: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
Example channel ARN format: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
Specifies a list of tags to be removed.
" } }, - "documentation":"Specifies the tags to remove from a trail, event data store, or channel.
" + "documentation":"Specifies the tags to remove from a trail, event data store, dashboard, or channel.
" }, "RemoveTagsResponse":{ "type":"structure", @@ -4027,6 +4447,32 @@ }, "documentation":"Returns the objects or data listed below if successful. Otherwise, returns an error.
" }, + "RequestWidget":{ + "type":"structure", + "required":[ + "QueryStatement", + "ViewProperties" + ], + "members":{ + "QueryStatement":{ + "shape":"QueryStatement", + "documentation":"The query statement for the widget. For custom dashboard widgets, you can query across multiple event data stores as long as all event data stores exist in your account.
When a query uses ?
with eventTime
, ?
must be surrounded by single quotes as follows: '?'
.
The optional query parameters. The following query parameters are valid: $StartTime$
, $EndTime$
, and $Period$
.
The view properties for the widget. For more information about view properties, see View properties for widgets in the CloudTrail User Guide.
" + } + }, + "documentation":"Contains information about a widget on a CloudTrail Lake dashboard.
" + }, + "RequestWidgetList":{ + "type":"list", + "member":{"shape":"RequestWidget"} + }, "Resource":{ "type":"structure", "members":{ @@ -4045,7 +4491,7 @@ "type":"structure", "members":{ }, - "documentation":" This exception is thrown when the provided resource does not exist, or the ARN format of the resource is not valid. The following is the valid format for a resource ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/MyChannel
.
This exception is thrown when the provided resource does not exist, or the ARN format of the resource is not valid.
The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE
The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash
The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890
This exception is thrown when the resouce-based policy has syntax errors, or contains a principal that is not valid.
The following are requirements for the resource policy:
Contains only one action: cloudtrail-data:PutAuditEvents
Contains at least one statement. The policy can have a maximum of 20 statements.
Each statement contains at least one principal. A statement can have a maximum of 50 principals.
This exception is thrown when the resouce-based policy has syntax errors, or contains a principal that is not valid.
", "exception":true }, "ResourceTag":{ @@ -4224,6 +4670,13 @@ "min":0, "pattern":".*" }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"This exception is thrown when the quota is exceeded. For information about CloudTrail quotas, see Service quotas in the Amazon Web Services General Reference.
", + "exception":true + }, "Source":{ "type":"string", "max":256, @@ -4244,6 +4697,29 @@ }, "documentation":"Contains configuration information about the channel.
" }, + "StartDashboardRefreshRequest":{ + "type":"structure", + "required":["DashboardId"], + "members":{ + "DashboardId":{ + "shape":"DashboardArn", + "documentation":"The name or ARN of the dashboard.
" + }, + "QueryParameterValues":{ + "shape":"QueryParameterValues", + "documentation":"The query parameter values for the dashboard
For custom dashboards, the following query parameters are valid: $StartTime$
, $EndTime$
, and $Period$
.
For managed dashboards, the following query parameters are valid: $StartTime$
, $EndTime$
, $Period$
, and $EventDataStoreId$
. The $EventDataStoreId$
query parameter is required.
The refresh ID for the dashboard.
" + } + } + }, "StartEventDataStoreIngestionRequest":{ "type":"structure", "required":["EventDataStore"], @@ -4356,6 +4832,10 @@ "QueryParameters":{ "shape":"QueryParameters", "documentation":" The query parameters for the specified QueryAlias
.
The account ID of the event data store owner.
" } } }, @@ -4365,6 +4845,10 @@ "QueryId":{ "shape":"UUID", "documentation":"The ID of the started query.
" + }, + "EventDataStoreOwnerAccountId":{ + "shape":"AccountId", + "documentation":"The account ID of the event data store owner.
" } } }, @@ -4465,7 +4949,7 @@ "documentation":"The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.
" } }, - "documentation":"A custom key-value pair associated with a resource such as a CloudTrail trail, event data store, or channel.
" + "documentation":"A custom key-value pair associated with a resource such as a CloudTrail trail, event data store, dashboard, or channel.
" }, "TagKey":{ "type":"string", @@ -4481,7 +4965,7 @@ "type":"structure", "members":{ }, - "documentation":"The number of tags per trail, event data store, or channel has exceeded the permitted amount. Currently, the limit is 50.
", + "documentation":"The number of tags per trail, event data store, dashboard, or channel has exceeded the permitted amount. Currently, the limit is 50.
", "exception":true }, "TagsList":{ @@ -4498,6 +4982,10 @@ "documentation":"This exception is thrown when the request rate exceeds the limit.
", "exception":true }, + "TimeOfDay":{ + "type":"string", + "pattern":"^[0-9]{2}:[0-9]{2}" + }, "Timestamps":{ "type":"list", "member":{"shape":"Date"} @@ -4676,6 +5164,65 @@ } } }, + "UpdateDashboardRequest":{ + "type":"structure", + "required":["DashboardId"], + "members":{ + "DashboardId":{ + "shape":"DashboardArn", + "documentation":"The name or ARN of the dashboard.
" + }, + "Widgets":{ + "shape":"RequestWidgetList", + "documentation":"An array of widgets for the dashboard. A custom dashboard can have a maximum of 10 widgets.
To add new widgets, pass in an array that includes the existing widgets along with any new widgets. Run the GetDashboard
operation to get the list of widgets for the dashboard.
To remove widgets, pass in an array that includes the existing widgets minus the widgets you want removed.
" + }, + "RefreshSchedule":{ + "shape":"RefreshSchedule", + "documentation":"The refresh schedule configuration for the dashboard.
" + }, + "TerminationProtectionEnabled":{ + "shape":"TerminationProtectionEnabled", + "documentation":"Specifies whether termination protection is enabled for the dashboard. If termination protection is enabled, you cannot delete the dashboard until termination protection is disabled.
" + } + } + }, + "UpdateDashboardResponse":{ + "type":"structure", + "members":{ + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"The ARN for the dashboard.
" + }, + "Name":{ + "shape":"DashboardName", + "documentation":"The name for the dashboard.
" + }, + "Type":{ + "shape":"DashboardType", + "documentation":"The type of dashboard.
" + }, + "Widgets":{ + "shape":"WidgetList", + "documentation":"An array of widgets for the dashboard.
" + }, + "RefreshSchedule":{ + "shape":"RefreshSchedule", + "documentation":"The refresh schedule for the dashboard, if configured.
" + }, + "TerminationProtectionEnabled":{ + "shape":"TerminationProtectionEnabled", + "documentation":"Indicates whether termination protection is enabled for the dashboard.
" + }, + "CreatedTimestamp":{ + "shape":"Date", + "documentation":"The timestamp that shows when the dashboard was created.
" + }, + "UpdatedTimestamp":{ + "shape":"Date", + "documentation":"The timestamp that shows when the dashboard was updated.
" + } + } + }, "UpdateEventDataStoreRequest":{ "type":"structure", "required":["EventDataStore"], @@ -4888,6 +5435,49 @@ } }, "documentation":"Returns the objects or data listed below if successful. Otherwise, returns an error.
" + }, + "ViewPropertiesKey":{ + "type":"string", + "max":128, + "min":3, + "pattern":"^[a-zA-Z0-9._\\-]+$" + }, + "ViewPropertiesMap":{ + "type":"map", + "key":{"shape":"ViewPropertiesKey"}, + "value":{"shape":"ViewPropertiesValue"} + }, + "ViewPropertiesValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9._\\- ]+$" + }, + "Widget":{ + "type":"structure", + "members":{ + "QueryAlias":{ + "shape":"QueryAlias", + "documentation":"The query alias used to identify the query for the widget.
" + }, + "QueryStatement":{ + "shape":"QueryStatement", + "documentation":"The SQL query statement for the widget.
" + }, + "QueryParameters":{ + "shape":"QueryParameters", + "documentation":"The query parameters for the widget.
" + }, + "ViewProperties":{ + "shape":"ViewPropertiesMap", + "documentation":"The view properties for the widget. For more information about view properties, see View properties for widgets in the CloudTrail User Guide..
" + } + }, + "documentation":"A widget on a CloudTrail Lake dashboard.
" + }, + "WidgetList":{ + "type":"list", + "member":{"shape":"Widget"} } }, "documentation":"This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail.
CloudTrail is a web service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the Amazon Web Services API call, the source IP address, the request parameters, and the response elements returned by the service.
As an alternative to the API, you can use one of the Amazon Web Services SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide programmatic access to CloudTrail. For example, the SDKs handle cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools to Build on Amazon Web Services.
See the CloudTrail User Guide for information about the data that is included with each Amazon Web Services API call listed in the log files.
" diff --git a/botocore/data/ec2/2016-11-15/paginators-1.json b/botocore/data/ec2/2016-11-15/paginators-1.json index dd8661242a..db522f58b6 100644 --- a/botocore/data/ec2/2016-11-15/paginators-1.json +++ b/botocore/data/ec2/2016-11-15/paginators-1.json @@ -871,6 +871,18 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "SecurityGroupVpcAssociations" + }, + "DescribeCapacityBlockExtensionHistory": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CapacityBlockExtensions" + }, + "DescribeCapacityBlockExtensionOfferings": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CapacityBlockExtensionOfferings" } } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 2e7b33a6fe..01fe5b082f 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -470,7 +470,7 @@ }, "input":{"shape":"CancelCapacityReservationRequest"}, "output":{"shape":"CancelCapacityReservationResult"}, - "documentation":"Cancels the specified Capacity Reservation, releases the reserved capacity, and changes the Capacity Reservation's state to cancelled
.
Instances running in the reserved capacity continue running until you stop them. Stopped instances that target the Capacity Reservation can no longer launch. Modify these instances to either target a different Capacity Reservation, launch On-Demand Instance capacity, or run in any open Capacity Reservation that has matching attributes and sufficient capacity.
" + "documentation":"Cancels the specified Capacity Reservation, releases the reserved capacity, and changes the Capacity Reservation's state to cancelled
.
You can cancel a Capacity Reservation that is in the following states:
assessing
active
and there is no commitment duration or the commitment duration has elapsed. You can't cancel a future-dated Capacity Reservation during the commitment duration.
If a future-dated Capacity Reservation enters the delayed
state, the commitment duration is waived, and you can cancel it as soon as it enters the active
state.
Instances running in the reserved capacity continue running until you stop them. Stopped instances that target the Capacity Reservation can no longer launch. Modify these instances to either target a different Capacity Reservation, launch On-Demand Instance capacity, or run in any open Capacity Reservation that has matching attributes and sufficient capacity.
" }, "CancelCapacityReservationFleets":{ "name":"CancelCapacityReservationFleets", @@ -598,7 +598,7 @@ }, "input":{"shape":"CreateCapacityReservationRequest"}, "output":{"shape":"CreateCapacityReservationResult"}, - "documentation":"Creates a new Capacity Reservation with the specified attributes.
Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon EC2 User Guide.
Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.
Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Quotas in the Amazon EC2 User Guide.
" + "documentation":"Creates a new Capacity Reservation with the specified attributes. Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration.
You can create a Capacity Reservation at any time, and you can choose when it starts. You can create a Capacity Reservation for immediate use or you can request a Capacity Reservation for a future date.
For more information, see Reserve compute capacity with On-Demand Capacity Reservations in the Amazon EC2 User Guide.
Your request to create a Capacity Reservation could fail if:
Amazon EC2 does not have sufficient capacity. In this case, try again at a later time, try in a different Availability Zone, or request a smaller Capacity Reservation. If your workload is flexible across instance types and sizes, try with different instance attributes.
The requested quantity exceeds your On-Demand Instance quota. In this case, increase your On-Demand Instance quota for the requested instance type and try again. For more information, see Amazon EC2 Service Quotas in the Amazon EC2 User Guide.
Describes the IP address ranges that were specified in calls to ProvisionByoipCidr.
To describe the address pools that were created when you provisioned the address ranges, use DescribePublicIpv4Pools or DescribeIpv6Pools.
" }, + "DescribeCapacityBlockExtensionHistory":{ + "name":"DescribeCapacityBlockExtensionHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCapacityBlockExtensionHistoryRequest"}, + "output":{"shape":"DescribeCapacityBlockExtensionHistoryResult"}, + "documentation":"Describes the events for the specified Capacity Block extension during the specified time.
" + }, + "DescribeCapacityBlockExtensionOfferings":{ + "name":"DescribeCapacityBlockExtensionOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeCapacityBlockExtensionOfferingsRequest"}, + "output":{"shape":"DescribeCapacityBlockExtensionOfferingsResult"}, + "documentation":"Describes Capacity Block extension offerings available for purchase in the Amazon Web Services Region that you're currently using.
" + }, "DescribeCapacityBlockOfferings":{ "name":"DescribeCapacityBlockOfferings", "http":{ @@ -3705,7 +3725,7 @@ }, "input":{"shape":"DescribeVpcBlockPublicAccessOptionsRequest"}, "output":{"shape":"DescribeVpcBlockPublicAccessOptionsResult"}, - "documentation":"Describe VPC Block Public Access (BPA) options. VPC Block public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" + "documentation":"Describe VPC Block Public Access (BPA) options. VPC Block Public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" }, "DescribeVpcClassicLink":{ "name":"DescribeVpcClassicLink", @@ -5037,7 +5057,7 @@ }, "input":{"shape":"ModifyCapacityReservationRequest"}, "output":{"shape":"ModifyCapacityReservationResult"}, - "documentation":"Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with the required attributes. For more information, see Modify an active Capacity Reservation.
" + "documentation":"Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under which it is to be released. You can't modify a Capacity Reservation's instance type, EBS optimization, platform, instance store settings, Availability Zone, or tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, and then create a new one with the required attributes. For more information, see Modify an active Capacity Reservation.
The allowed modifications depend on the state of the Capacity Reservation:
assessing
or scheduled
state - You can modify the tags only.
pending
state - You can't modify the Capacity Reservation in any way.
active
state but still within the commitment duration - You can't decrease the instance count or set an end date that is within the commitment duration. All other modifications are allowed.
active
state with no commitment duration or elapsed commitment duration - All modifications are allowed.
expired
, cancelled
, unsupported
, or failed
state - You can't modify the Capacity Reservation in any way.
Modify VPC Block Public Access (BPA) options. VPC Block public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" + "documentation":"Modify VPC Block Public Access (BPA) options. VPC Block Public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" }, "ModifyVpcEndpoint":{ "name":"ModifyVpcEndpoint", @@ -5770,6 +5790,16 @@ "output":{"shape":"PurchaseCapacityBlockResult"}, "documentation":"Purchase the Capacity Block for use with your account. With Capacity Blocks you ensure GPU capacity is available for machine learning (ML) workloads. You must specify the ID of the Capacity Block offering you are purchasing.
" }, + "PurchaseCapacityBlockExtension":{ + "name":"PurchaseCapacityBlockExtension", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PurchaseCapacityBlockExtensionRequest"}, + "output":{"shape":"PurchaseCapacityBlockExtensionResult"}, + "documentation":"Purchase the Capacity Block extension for use with your account. You must specify the ID of the Capacity Block extension offering you are purchasing.
" + }, "PurchaseHostReservation":{ "name":"PurchaseHostReservation", "http":{ @@ -6935,6 +6965,22 @@ "max":50, "min":0 }, + "AddIpamOrganizationalUnitExclusion":{ + "type":"structure", + "members":{ + "OrganizationsEntityPath":{ + "shape":"String", + "documentation":"An Amazon Web Services Organizations entity path. Build the path for the OU(s) using Amazon Web Services Organizations IDs separated by a /
. Include all child OUs by ending the path with /*
.
Example 1
Path to a child OU: o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-ghi0-awsccccc/ou-jkl0-awsddddd/
In this example, o-a1b2c3d4e5
is the organization ID, r-f6g7h8i9j0example
is the root ID , ou-ghi0-awsccccc
is an OU ID, and ou-jkl0-awsddddd
is a child OU ID.
IPAM will not manage the IP addresses in accounts in the child OU.
Example 2
Path where all child OUs will be part of the exclusion: o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-ghi0-awsccccc/*
In this example, IPAM will not manage the IP addresses in accounts in the OU (ou-ghi0-awsccccc
) or in accounts in any OUs that are children of the OU.
For more information on how to construct an entity path, see Understand the Amazon Web Services Organizations entity path in the Amazon Web Services Identity and Access Management User Guide.
" + } + }, + "documentation":"Add an Organizational Unit (OU) exclusion to your IPAM. If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion. There is a limit on the number of exclusions you can create. For more information, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.
" + }, + "AddIpamOrganizationalUnitExclusionSet":{ + "type":"list", + "member":{"shape":"AddIpamOrganizationalUnitExclusion"}, + "max":10, + "min":0 + }, "AddPrefixListEntries":{ "type":"list", "member":{"shape":"AddPrefixListEntry"}, @@ -9810,7 +9856,7 @@ "members":{ "InternetGatewayBlockMode":{ "shape":"BlockPublicAccessMode", - "documentation":"The mode of VPC BPA.
bidirectional-access-allowed
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
bidirectional-access-blocked
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
ingress-access-blocked
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
The mode of VPC BPA.
off
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
block-bidirectional
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
block-ingress
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
The reservation ID of the Capacity Block extension.
", + "locationName":"capacityReservationId" + }, + "InstanceType":{ + "shape":"String", + "documentation":"The instance type of the Capacity Block extension.
", + "locationName":"instanceType" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"The number of instances in the Capacity Block extension.
", + "locationName":"instanceCount" + }, + "AvailabilityZone":{ + "shape":"AvailabilityZoneName", + "documentation":"The Availability Zone of the Capacity Block extension.
", + "locationName":"availabilityZone" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"The Availability Zone ID of the Capacity Block extension.
", + "locationName":"availabilityZoneId" + }, + "CapacityBlockExtensionOfferingId":{ + "shape":"OfferingId", + "documentation":"The ID of the Capacity Block extension offering.
", + "locationName":"capacityBlockExtensionOfferingId" + }, + "CapacityBlockExtensionDurationHours":{ + "shape":"Integer", + "documentation":"The duration of the Capacity Block extension in hours.
", + "locationName":"capacityBlockExtensionDurationHours" + }, + "CapacityBlockExtensionStatus":{ + "shape":"CapacityBlockExtensionStatus", + "documentation":"The status of the Capacity Block extension. A Capacity Block extension can have one of the following statuses:
payment-pending
- The Capacity Block extension payment is processing. If your payment can't be processed within 12 hours, the Capacity Block extension is failed.
payment-failed
- Payment for the Capacity Block extension request was not successful.
payment-succeeded
- Payment for the Capacity Block extension request was successful. You receive an invoice that reflects the one-time upfront payment. In the invoice, you can associate the paid amount with the Capacity Block reservation ID.
The date when the Capacity Block extension was purchased.
", + "locationName":"capacityBlockExtensionPurchaseDate" + }, + "CapacityBlockExtensionStartDate":{ + "shape":"MillisecondDateTime", + "documentation":"The start date of the Capacity Block extension.
", + "locationName":"capacityBlockExtensionStartDate" + }, + "CapacityBlockExtensionEndDate":{ + "shape":"MillisecondDateTime", + "documentation":"The end date of the Capacity Block extension.
", + "locationName":"capacityBlockExtensionEndDate" + }, + "UpfrontFee":{ + "shape":"String", + "documentation":"The total price to be paid up front.
", + "locationName":"upfrontFee" + }, + "CurrencyCode":{ + "shape":"String", + "documentation":"The currency of the payment for the Capacity Block extension.
", + "locationName":"currencyCode" + } + }, + "documentation":"Describes a Capacity Block extension. With an extension, you can extend the duration of time for an existing Capacity Block.
" + }, + "CapacityBlockExtensionOffering":{ + "type":"structure", + "members":{ + "CapacityBlockExtensionOfferingId":{ + "shape":"OfferingId", + "documentation":"The ID of the Capacity Block extension offering.
", + "locationName":"capacityBlockExtensionOfferingId" + }, + "InstanceType":{ + "shape":"String", + "documentation":"The instance type of the Capacity Block that will be extended.
", + "locationName":"instanceType" + }, + "InstanceCount":{ + "shape":"Integer", + "documentation":"The number of instances in the Capacity Block extension offering.
", + "locationName":"instanceCount" + }, + "AvailabilityZone":{ + "shape":"AvailabilityZoneName", + "documentation":"The Availability Zone of the Capacity Block that will be extended.
", + "locationName":"availabilityZone" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"The Availability Zone ID of the Capacity Block that will be extended.
", + "locationName":"availabilityZoneId" + }, + "StartDate":{ + "shape":"MillisecondDateTime", + "documentation":"The start date of the Capacity Block that will be extended.
", + "locationName":"startDate" + }, + "CapacityBlockExtensionStartDate":{ + "shape":"MillisecondDateTime", + "documentation":"The date and time at which the Capacity Block extension will start. This date is also the same as the end date of the Capacity Block that will be extended.
", + "locationName":"capacityBlockExtensionStartDate" + }, + "CapacityBlockExtensionEndDate":{ + "shape":"MillisecondDateTime", + "documentation":"The date and time at which the Capacity Block extension expires. When a Capacity Block expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Block's state changes to expired
when it reaches its end date
The amount of time of the Capacity Block extension offering in hours.
", + "locationName":"capacityBlockExtensionDurationHours" + }, + "UpfrontFee":{ + "shape":"String", + "documentation":"The total price of the Capacity Block extension offering, to be paid up front.
", + "locationName":"upfrontFee" + }, + "CurrencyCode":{ + "shape":"String", + "documentation":"The currency of the payment for the Capacity Block extension offering.
", + "locationName":"currencyCode" + }, + "Tenancy":{ + "shape":"CapacityReservationTenancy", + "documentation":"Indicates the tenancy of the Capacity Block extension offering. A Capacity Block can have one of the following tenancy settings:
default
- The Capacity Block is created on hardware that is shared with other Amazon Web Services accounts.
dedicated
- The Capacity Block is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.
The recommended Capacity Block extension that fits your search requirements.
" + }, + "CapacityBlockExtensionOfferingSet":{ + "type":"list", + "member":{ + "shape":"CapacityBlockExtensionOffering", + "locationName":"item" + } + }, + "CapacityBlockExtensionSet":{ + "type":"list", + "member":{ + "shape":"CapacityBlockExtension", + "locationName":"item" + } + }, + "CapacityBlockExtensionStatus":{ + "type":"string", + "enum":[ + "payment-pending", + "payment-failed", + "payment-succeeded" + ] + }, "CapacityBlockOffering":{ "type":"structure", "members":{ @@ -10520,7 +10725,7 @@ }, "CapacityBlockDurationHours":{ "shape":"Integer", - "documentation":"The amount of time of the Capacity Block reservation in hours.
", + "documentation":"The number of hours (in addition to capacityBlockDurationMinutes
) for the duration of the Capacity Block reservation. For example, if a Capacity Block starts at 04:55 and ends at 11:30, the hours field would be 6.
The tenancy of the Capacity Block.
", "locationName":"tenancy" + }, + "CapacityBlockDurationMinutes":{ + "shape":"Integer", + "documentation":"The number of minutes (in addition to capacityBlockDurationHours
) for the duration of the Capacity Block reservation. For example, if a Capacity Block starts at 08:55 and ends at 11:30, the minutes field would be 35.
The recommended Capacity Block that fits your search requirements.
" @@ -10613,7 +10823,7 @@ }, "State":{ "shape":"CapacityReservationState", - "documentation":"The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:
active
- The Capacity Reservation is active and the capacity is available for your use.
expired
- The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.
cancelled
- The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.
pending
- The Capacity Reservation request was successful but the capacity provisioning is still pending.
failed
- The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.
The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:
active
- The capacity is available for use.
expired
- The Capacity Reservation expired automatically at the date and time specified in your reservation request. The reserved capacity is no longer available for your use.
cancelled
- The Capacity Reservation was canceled. The reserved capacity is no longer available for your use.
pending
- The Capacity Reservation request was successful but the capacity provisioning is still pending.
failed
- The Capacity Reservation request has failed. A request can fail due to request parameters that are not valid, capacity constraints, or instance limit constraints. You can view a failed request for 60 minutes.
scheduled
- (Future-dated Capacity Reservations only) The future-dated Capacity Reservation request was approved and the Capacity Reservation is scheduled for delivery on the requested start date.
assessing
- (Future-dated Capacity Reservations only) Amazon EC2 is assessing your request for a future-dated Capacity Reservation.
delayed
- (Future-dated Capacity Reservations only) Amazon EC2 encountered a delay in provisioning the requested future-dated Capacity Reservation. Amazon EC2 is unable to deliver the requested capacity by the requested start date and time.
unsupported
- (Future-dated Capacity Reservations only) Amazon EC2 can't support the future-dated Capacity Reservation request due to capacity constraints. You can view unsupported requests for 30 days. The Capacity Reservation will not be delivered.
The ID of the Amazon Web Services account to which billing of the unused capacity of the Capacity Reservation is assigned.
", "locationName":"unusedReservationBillingOwnerId" + }, + "CommitmentInfo":{ + "shape":"CapacityReservationCommitmentInfo", + "documentation":"Information about your commitment for a future-dated Capacity Reservation.
", + "locationName":"commitmentInfo" + }, + "DeliveryPreference":{ + "shape":"CapacityReservationDeliveryPreference", + "documentation":"The delivery method for a future-dated Capacity Reservation. incremental
indicates that the requested capacity is delivered in addition to any running instances and reserved capacity that you have in your account at the requested date and time.
Describes a Capacity Reservation.
" @@ -10738,6 +10958,34 @@ "expired" ] }, + "CapacityReservationCommitmentDuration":{ + "type":"long", + "max":200000000, + "min":1 + }, + "CapacityReservationCommitmentInfo":{ + "type":"structure", + "members":{ + "CommittedInstanceCount":{ + "shape":"Integer", + "documentation":"The instance capacity that you committed to when you requested the future-dated Capacity Reservation.
", + "locationName":"committedInstanceCount" + }, + "CommitmentEndDate":{ + "shape":"MillisecondDateTime", + "documentation":"The date and time at which the commitment duration expires, in the ISO8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ
). You can't decrease the instance count or cancel the Capacity Reservation before this date and time.
Information about your commitment for a future-dated Capacity Reservation.
" + }, + "CapacityReservationDeliveryPreference":{ + "type":"string", + "enum":[ + "fixed", + "incremental" + ] + }, "CapacityReservationFleet":{ "type":"structure", "members":{ @@ -11012,7 +11260,10 @@ "failed", "scheduled", "payment-pending", - "payment-failed" + "payment-failed", + "assessing", + "delayed", + "unsupported" ] }, "CapacityReservationTarget":{ @@ -12735,7 +12986,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"The instance type for which to reserve capacity. For more information, see Instance types in the Amazon EC2 User Guide.
" + "documentation":"The instance type for which to reserve capacity.
You can request future-dated Capacity Reservations for instance types in the C, M, R, I, and T instance families only.
For more information, see Instance types in the Amazon EC2 User Guide.
" }, "InstancePlatform":{ "shape":"CapacityReservationInstancePlatform", @@ -12755,7 +13006,7 @@ }, "InstanceCount":{ "shape":"Integer", - "documentation":"The number of instances for which to reserve capacity.
Valid range: 1 - 1000
" + "documentation":"The number of instances for which to reserve capacity.
You can request future-dated Capacity Reservations for an instance count with a minimum of 100 VPUs. For example, if you request a future-dated Capacity Reservation for m5.xlarge
instances, you must request at least 25 instances (25 * m5.xlarge = 100 vCPUs).
Valid range: 1 - 1000
" }, "EbsOptimized":{ "shape":"Boolean", @@ -12767,7 +13018,7 @@ }, "EndDate":{ "shape":"DateTime", - "documentation":"The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to expired
when it reaches its end date and time.
You must provide an EndDate
value if EndDateType
is limited
. Omit EndDate
if EndDateType
is unlimited
.
If the EndDateType
is limited
, the Capacity Reservation is cancelled within an hour from the specified time. For example, if you specify 5/31/2019, 13:30:55, the Capacity Reservation is guaranteed to end between 13:30:55 and 14:30:55 on 5/31/2019.
The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to expired
when it reaches its end date and time.
You must provide an EndDate
value if EndDateType
is limited
. Omit EndDate
if EndDateType
is unlimited
.
If the EndDateType
is limited
, the Capacity Reservation is cancelled within an hour from the specified time. For example, if you specify 5/31/2019, 13:30:55, the Capacity Reservation is guaranteed to end between 13:30:55 and 14:30:55 on 5/31/2019.
If you are requesting a future-dated Capacity Reservation, you can't specify an end date and time that is within the commitment duration.
" }, "EndDateType":{ "shape":"EndDateType", @@ -12775,7 +13026,7 @@ }, "InstanceMatchCriteria":{ "shape":"InstanceMatchCriteria", - "documentation":"Indicates the type of instance launches that the Capacity Reservation accepts. The options include:
open
- The Capacity Reservation automatically matches all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes run in the Capacity Reservation automatically without specifying any additional parameters.
targeted
- The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.
Default: open
Indicates the type of instance launches that the Capacity Reservation accepts. The options include:
open
- The Capacity Reservation automatically matches all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes run in the Capacity Reservation automatically without specifying any additional parameters.
targeted
- The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.
If you are requesting a future-dated Capacity Reservation, you must specify targeted
.
Default: open
The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.
" + "documentation":"Not supported for future-dated Capacity Reservations.
The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.
" }, "PlacementGroupArn":{ "shape":"PlacementGroupArn", - "documentation":"The Amazon Resource Name (ARN) of the cluster placement group in which to create the Capacity Reservation. For more information, see Capacity Reservations for cluster placement groups in the Amazon EC2 User Guide.
" + "documentation":"Not supported for future-dated Capacity Reservations.
The Amazon Resource Name (ARN) of the cluster placement group in which to create the Capacity Reservation. For more information, see Capacity Reservations for cluster placement groups in the Amazon EC2 User Guide.
" + }, + "StartDate":{ + "shape":"MillisecondDateTime", + "documentation":"Required for future-dated Capacity Reservations only. To create a Capacity Reservation for immediate use, omit this parameter.
The date and time at which the future-dated Capacity Reservation should become available for use, in the ISO8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ
).
You can request a future-dated Capacity Reservation between 5 and 120 days in advance.
" + }, + "CommitmentDuration":{ + "shape":"CapacityReservationCommitmentDuration", + "documentation":"Required for future-dated Capacity Reservations only. To create a Capacity Reservation for immediate use, omit this parameter.
Specify a commitment duration, in seconds, for the future-dated Capacity Reservation.
The commitment duration is a minimum duration for which you commit to having the future-dated Capacity Reservation in the active
state in your account after it has been delivered.
For more information, see Commitment duration.
" + }, + "DeliveryPreference":{ + "shape":"CapacityReservationDeliveryPreference", + "documentation":"Required for future-dated Capacity Reservations only. To create a Capacity Reservation for immediate use, omit this parameter.
Indicates that the requested capacity will be delivered in addition to any running instances or reserved capacity that you have in your account at the requested date and time.
The only supported value is incremental
.
The exclusion mode for internet gateway traffic.
bidirectional-access-allowed
: Allow all internet traffic to and from the excluded VPCs and subnets.
egress-access-allowed
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The exclusion mode for internet gateway traffic.
allow-bidirectional
: Allow all internet traffic to and from the excluded VPCs and subnets.
allow-egress
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The IDs of Capacity Block reservations that you want to display the history for.
", + "locationName":"CapacityReservationId" + }, + "NextToken":{ + "shape":"String", + "documentation":"The token to use to retrieve the next page of results.
" + }, + "MaxResults":{ + "shape":"DescribeFutureCapacityMaxResults", + "documentation":"The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"One or more filters
availability-zone
- The Availability Zone of the extension.
availability-zone-id
- The Availability Zone ID of the extension.
capacity-block-extension-offering-id
- The ID of the extension offering.
capacity-block-extension-status
- The status of the extension (payment-pending
| payment-failed
| payment-succeeded
).
capacity-reservation-id
- The reservation ID of the extension.
instance-type
- The instance type of the extension.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Describes one or more of your Capacity Block extensions. The results describe only the Capacity Block extensions in the Amazon Web Services Region that you're currently using.
", + "locationName":"capacityBlockExtensionSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The duration of the Capacity Block extension offering in hours.
" + }, + "CapacityReservationId":{ + "shape":"CapacityReservationId", + "documentation":"The ID of the Capacity reservation to be extended.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"The token to use to retrieve the next page of results.
" + }, + "MaxResults":{ + "shape":"DescribeCapacityBlockExtensionOfferingsMaxResults", + "documentation":"The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" + } + } + }, + "DescribeCapacityBlockExtensionOfferingsResult":{ + "type":"structure", + "members":{ + "CapacityBlockExtensionOfferings":{ + "shape":"CapacityBlockExtensionOfferingSet", + "documentation":"The recommended Capacity Block extension offerings for the dates specified.
", + "locationName":"capacityBlockExtensionOfferingSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Filters for the request:
resource-arn
- The Amazon Resource Name (ARN) of a exclusion.
internet-gateway-exclusion-mode
- The mode of a VPC BPA exclusion. Possible values: bidirectional-access-allowed | egress-access-allowed
.
state
- The state of VPC BPA. Possible values: create-in-progress | create-complete | update-in-progress | update-complete | delete-in-progress | deleted-complete | disable-in-progress | disable-complete
tag
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
tag-value
: The value of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific value, regardless of the tag key.
Filters for the request:
resource-arn
- The Amazon Resource Name (ARN) of a exclusion.
internet-gateway-exclusion-mode
- The mode of a VPC BPA exclusion. Possible values: allow-bidirectional | allow-egress
.
state
- The state of VPC BPA. Possible values: create-in-progress | create-complete | update-in-progress | update-complete | delete-in-progress | deleted-complete | disable-in-progress | disable-complete
tag
- The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
tag-value
: The value of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific value, regardless of the tag key.
The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:
active
- The Capacity Reservation is active and the capacity is available for your use.
expired
- The Capacity Reservation expired automatically at the date and time specified in your request. The reserved capacity is no longer available for your use.
cancelled
- The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.
pending
- The Capacity Reservation request was successful but the capacity provisioning is still pending.
failed
- The Capacity Reservation request has failed. A request might fail due to invalid request parameters, capacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.
The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:
active
- The capacity is available for use.
expired
- The Capacity Reservation expired automatically at the date and time specified in your reservation request. The reserved capacity is no longer available for your use.
cancelled
- The Capacity Reservation was canceled. The reserved capacity is no longer available for your use.
pending
- The Capacity Reservation request was successful but the capacity provisioning is still pending.
failed
- The Capacity Reservation request has failed. A request can fail due to request parameters that are not valid, capacity constraints, or instance limit constraints. You can view a failed request for 60 minutes.
scheduled
- (Future-dated Capacity Reservations only) The future-dated Capacity Reservation request was approved and the Capacity Reservation is scheduled for delivery on the requested start date.
assessing
- (Future-dated Capacity Reservations only) Amazon EC2 is assessing your request for a future-dated Capacity Reservation.
delayed
- (Future-dated Capacity Reservations only) Amazon EC2 encountered a delay in provisioning the requested future-dated Capacity Reservation. Amazon EC2 is unable to deliver the requested capacity by the requested start date and time.
unsupported
- (Future-dated Capacity Reservations only) Amazon EC2 can't support the future-dated Capacity Reservation request due to capacity constraints. You can view unsupported requests for 30 days. The Capacity Reservation will not be delivered.
The last successful resource discovery time.
", "locationName":"lastSuccessfulDiscoveryTime" + }, + "OrganizationalUnitId":{ + "shape":"String", + "documentation":"The ID of an Organizational Unit in Amazon Web Services Organizations.
", + "locationName":"organizationalUnitId" } }, "documentation":"An IPAM discovered account. A discovered account is an Amazon Web Services account that is monitored under a resource discovery. If you have integrated IPAM with Amazon Web Services Organizations, all accounts in the organization are discovered accounts.
" @@ -39497,6 +39861,24 @@ "locationName":"item" } }, + "IpamOrganizationalUnitExclusion":{ + "type":"structure", + "members":{ + "OrganizationsEntityPath":{ + "shape":"String", + "documentation":"An Amazon Web Services Organizations entity path. For more information on the entity path, see Understand the Amazon Web Services Organizations entity path in the Amazon Web Services Identity and Access Management User Guide.
", + "locationName":"organizationsEntityPath" + } + }, + "documentation":"If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion.
" + }, + "IpamOrganizationalUnitExclusionSet":{ + "type":"list", + "member":{ + "shape":"IpamOrganizationalUnitExclusion", + "locationName":"item" + } + }, "IpamOverlapStatus":{ "type":"string", "enum":[ @@ -40105,6 +40487,11 @@ "shape":"TagList", "documentation":"A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value. You can use tags to search and filter your resources or track your Amazon Web Services costs.
", "locationName":"tagSet" + }, + "OrganizationalUnitExclusions":{ + "shape":"IpamOrganizationalUnitExclusionSet", + "documentation":"If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion.
", + "locationName":"organizationalUnitExclusionSet" } }, "documentation":"A resource discovery is an IPAM component that enables IPAM to manage and monitor resources that belong to the owning account.
" @@ -44477,6 +44864,16 @@ "shape":"RemoveIpamOperatingRegionSet", "documentation":"Remove operating Regions.
", "locationName":"RemoveOperatingRegion" + }, + "AddOrganizationalUnitExclusions":{ + "shape":"AddIpamOrganizationalUnitExclusionSet", + "documentation":"Add an Organizational Unit (OU) exclusion to your IPAM. If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion. There is a limit on the number of exclusions you can create. For more information, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.
", + "locationName":"AddOrganizationalUnitExclusion" + }, + "RemoveOrganizationalUnitExclusions":{ + "shape":"RemoveIpamOrganizationalUnitExclusionSet", + "documentation":"Remove an Organizational Unit (OU) exclusion to your IPAM. If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion. There is a limit on the number of exclusions you can create. For more information, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.
", + "locationName":"RemoveOrganizationalUnitExclusion" } } }, @@ -45790,7 +46187,7 @@ }, "InternetGatewayExclusionMode":{ "shape":"InternetGatewayExclusionMode", - "documentation":"The exclusion mode for internet gateway traffic.
bidirectional-access-allowed
: Allow all internet traffic to and from the excluded VPCs and subnets.
egress-access-allowed
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The exclusion mode for internet gateway traffic.
allow-bidirectional
: Allow all internet traffic to and from the excluded VPCs and subnets.
allow-egress
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The mode of VPC BPA.
bidirectional-access-allowed
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
bidirectional-access-blocked
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
ingress-access-blocked
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
The mode of VPC BPA.
off
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
block-bidirectional
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
block-ingress
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
Describes the result of the purchase.
" }, + "PurchaseCapacityBlockExtensionRequest":{ + "type":"structure", + "required":[ + "CapacityBlockExtensionOfferingId", + "CapacityReservationId" + ], + "members":{ + "CapacityBlockExtensionOfferingId":{ + "shape":"OfferingId", + "documentation":"The ID of the Capacity Block extension offering to purchase.
" + }, + "CapacityReservationId":{ + "shape":"CapacityReservationId", + "documentation":"The ID of the Capacity reservation to be extended.
" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The purchased Capacity Block extensions.
", + "locationName":"capacityBlockExtensionSet" + } + } + }, "PurchaseCapacityBlockRequest":{ "type":"structure", "required":[ @@ -50807,6 +51235,22 @@ "max":50, "min":0 }, + "RemoveIpamOrganizationalUnitExclusion":{ + "type":"structure", + "members":{ + "OrganizationsEntityPath":{ + "shape":"String", + "documentation":"An Amazon Web Services Organizations entity path. Build the path for the OU(s) using Amazon Web Services Organizations IDs separated by a /
. Include all child OUs by ending the path with /*
.
Example 1
Path to a child OU: o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-ghi0-awsccccc/ou-jkl0-awsddddd/
In this example, o-a1b2c3d4e5
is the organization ID, r-f6g7h8i9j0example
is the root ID , ou-ghi0-awsccccc
is an OU ID, and ou-jkl0-awsddddd
is a child OU ID.
IPAM will not manage the IP addresses in accounts in the child OU.
Example 2
Path where all child OUs will be part of the exclusion: o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-ghi0-awsccccc/*
In this example, IPAM will not manage the IP addresses in accounts in the OU (ou-ghi0-awsccccc
) or in accounts in any OUs that are children of the OU.
For more information on how to construct an entity path, see Understand the Amazon Web Services Organizations entity path in the Amazon Web Services Identity and Access Management User Guide.
" + } + }, + "documentation":"Remove an Organizational Unit (OU) exclusion to your IPAM. If your IPAM is integrated with Amazon Web Services Organizations and you add an organizational unit (OU) exclusion, IPAM will not manage the IP addresses in accounts in that OU exclusion. There is a limit on the number of exclusions you can create. For more information, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.
" + }, + "RemoveIpamOrganizationalUnitExclusionSet":{ + "type":"list", + "member":{"shape":"RemoveIpamOrganizationalUnitExclusion"}, + "max":10, + "min":0 + }, "RemovePrefixListEntries":{ "type":"list", "member":{"shape":"RemovePrefixListEntry"}, @@ -62081,7 +62525,7 @@ }, "InternetGatewayExclusionMode":{ "shape":"InternetGatewayExclusionMode", - "documentation":"The exclusion mode for internet gateway traffic.
bidirectional-access-allowed
: Allow all internet traffic to and from the excluded VPCs and subnets.
egress-access-allowed
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The exclusion mode for internet gateway traffic.
allow-bidirectional
: Allow all internet traffic to and from the excluded VPCs and subnets.
allow-egress
: Allow outbound internet traffic from the excluded VPCs and subnets. Block inbound internet traffic to the excluded VPCs and subnets. Only applies when VPC Block Public Access is set to Bidirectional.
The current mode of VPC BPA.
bidirectional-access-allowed
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
bidirectional-access-blocked
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
ingress-access-blocked
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
The current mode of VPC BPA.
off
: VPC BPA is not enabled and traffic is allowed to and from internet gateways and egress-only internet gateways in this Region.
block-bidirectional
: Block all traffic to and from internet gateways and egress-only internet gateways in this Region (except for excluded VPCs and subnets).
block-ingress
: Block all internet traffic to the VPCs in this Region (except for VPCs or subnets which are excluded). Only traffic to and from NAT gateways and egress-only internet gateways is allowed because these gateways only allow outbound connections to be established.
VPC Block public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" + "documentation":"VPC Block Public Access (BPA) enables you to block resources in VPCs and subnets that you own in a Region from reaching or being reached from the internet through internet gateways and egress-only internet gateways. To learn more about VPC BPA, see Block public access to VPCs and subnets in the Amazon VPC User Guide.
" }, "VpcBlockPublicAccessState":{ "type":"string", diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index f09701c9da..d3711ded72 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -42,7 +42,7 @@ {"shape":"TagQuotaPerResourceExceeded"}, {"shape":"InvalidARNFault"} ], - "documentation":"A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
For example, you can use cost-allocation tags to your ElastiCache resources, Amazon generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services.
For more information, see Using Cost Allocation Tags in Amazon ElastiCache in the ElastiCache User Guide.
" + "documentation":"A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
For example, you can use cost-allocation tags to your ElastiCache resources, Amazon generates a cost allocation report as a comma-separated value (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories (such as cost centers, application names, or owners) to organize your costs across multiple services.
For more information, see Using Cost Allocation Tags in Amazon ElastiCache in the ElastiCache User Guide.
" }, "AuthorizeCacheSecurityGroupIngress":{ "name":"AuthorizeCacheSecurityGroupIngress", @@ -79,7 +79,7 @@ {"shape":"ServiceUpdateNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"Apply the service update. For more information on service updates and applying them, see Applying Service Updates.
" + "documentation":"Apply the service update. For more information on service updates and applying them, see Applying Service Updates.
" }, "BatchStopUpdateAction":{ "name":"BatchStopUpdateAction", @@ -96,7 +96,7 @@ {"shape":"ServiceUpdateNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"Stop the service update. For more information on service updates and stopping them, see Stopping Service Updates.
" + "documentation":"Stop the service update. For more information on service updates and stopping them, see Stopping Service Updates.
" }, "CompleteMigration":{ "name":"CompleteMigration", @@ -159,7 +159,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InvalidParameterCombinationException"} ], - "documentation":"Makes a copy of an existing snapshot.
This operation is valid for Valkey or Redis OSS only.
Users or groups that have permissions to use the CopySnapshot
operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot
operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.
You could receive the following error messages.
Error Messages
Error Message: The S3 bucket %s is outside of the region.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The S3 bucket %s does not exist.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The S3 bucket %s is not owned by the authenticated user.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.
Solution: Contact your system administrator to get the needed permissions.
Error Message: The S3 bucket %s already contains an object with key %s.
Solution: Give the TargetSnapshotName
a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName
.
Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.
Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.
Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.
Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Makes a copy of an existing snapshot.
This operation is valid for Valkey or Redis OSS only.
Users or groups that have permissions to use the CopySnapshot
operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot
operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control.
You could receive the following error messages.
Error Messages
Error Message: The S3 bucket %s is outside of the region.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The S3 bucket %s does not exist.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The S3 bucket %s is not owned by the authenticated user.
Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: The authenticated user does not have sufficient permissions to perform the desired activity.
Solution: Contact your system administrator to get the needed permissions.
Error Message: The S3 bucket %s already contains an object with key %s.
Solution: Give the TargetSnapshotName
a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName
.
Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket.
Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket.
Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket.
Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide.
Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster or replication group using the CacheParameterGroup.
A newly created CacheParameterGroup is an exact duplicate of the default parameter group for the CacheParameterGroupFamily. To customize the newly created CacheParameterGroup you can change the values of specific parameters. For more information, see:
ModifyCacheParameterGroup in the ElastiCache API Reference.
Parameters and Parameter Groups in the ElastiCache User Guide.
Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster or replication group using the CacheParameterGroup.
A newly created CacheParameterGroup is an exact duplicate of the default parameter group for the CacheParameterGroupFamily. To customize the newly created CacheParameterGroup you can change the values of specific parameters. For more information, see:
ModifyCacheParameterGroup in the ElastiCache API Reference.
Parameters and Parameter Groups in the ElastiCache User Guide.
Global Datastore offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore with Valkey or Redis OSS, you can create cross-region read replica clusters for ElastiCache to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.
The GlobalReplicationGroupIdSuffix is the name of the Global datastore.
The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.
Global Datastore offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore with Valkey or Redis OSS, you can create cross-region read replica clusters for ElastiCache to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore.
The GlobalReplicationGroupIdSuffix is the name of the Global datastore.
The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster.
Creates a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group.
This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.
A Valkey or Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Valkey or Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.
The node or shard limit can be increased to a maximum of 500 per cluster if the Valkey or Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster.
To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type.
When a Valkey or Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use scaling. For more information, see Scaling self-designed clusters in the ElastiCache User Guide.
This operation is valid for Valkey and Redis OSS only.
Creates a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group.
This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore.
A Valkey or Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Valkey or Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed.
The node or shard limit can be increased to a maximum of 500 per cluster if the Valkey or Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster.
To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type.
When a Valkey or Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use scaling. For more information, see Scaling self-designed clusters in the ElastiCache User Guide.
This operation is valid for Valkey and Redis OSS only.
For Valkey engine version 7.2 onwards and Redis OSS 6.0 and onwards: Creates a user. For more information, see Using Role Based Access Control (RBAC).
" + "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 and onwards: Creates a user. For more information, see Using Role Based Access Control (RBAC).
" }, "CreateUserGroup":{ "name":"CreateUserGroup", @@ -423,7 +423,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TagQuotaPerResourceExceeded"} ], - "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Creates a user group. For more information, see Using Role Based Access Control (RBAC)
" + "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Creates a user group. For more information, see Using Role Based Access Control (RBAC)
" }, "DecreaseNodeGroupsInGlobalReplicationGroup":{ "name":"DecreaseNodeGroupsInGlobalReplicationGroup", @@ -654,7 +654,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"DefaultUserAssociatedToUserGroupFault"} ], - "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).
" + "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC).
" }, "DeleteUserGroup":{ "name":"DeleteUserGroup", @@ -673,7 +673,7 @@ {"shape":"ServiceLinkedRoleNotFoundFault"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).
" + "documentation":"For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC).
" }, "DescribeCacheClusters":{ "name":"DescribeCacheClusters", @@ -1158,7 +1158,7 @@ {"shape":"InvalidServerlessCacheSnapshotStateFault"}, {"shape":"InvalidARNFault"} ], - "documentation":"Lists all tags currently on a named resource.
A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
If the cluster is not in the available state, ListTagsForResource
returns an error.
Lists all tags currently on a named resource.
A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
If the cluster is not in the available state, ListTagsForResource
returns an error.
Modifies the settings for a replication group. This is limited to Valkey and Redis OSS 7 and above.
Scaling for Valkey or Redis OSS (cluster mode enabled) in the ElastiCache User Guide
ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference
This operation is valid for Valkey or Redis OSS only.
Modifies the settings for a replication group. This is limited to Valkey and Redis OSS 7 and above.
Scaling for Valkey or Redis OSS (cluster mode enabled) in the ElastiCache User Guide
ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference
This operation is valid for Valkey or Redis OSS only.
Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes.
" + "documentation":"Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes.
" }, "RebalanceSlotsInGlobalReplicationGroup":{ "name":"RebalanceSlotsInGlobalReplicationGroup", @@ -1420,7 +1420,7 @@ {"shape":"InvalidCacheClusterStateFault"}, {"shape":"CacheClusterNotFoundFault"} ], - "documentation":"Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.
The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.
When the reboot is complete, a cluster event is created.
Rebooting a cluster is currently supported on Memcached, Valkey and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Valkey or Redis OSS (cluster mode enabled) clusters.
If you make changes to parameters that require a Valkey or Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.
" + "documentation":"Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING.
The reboot causes the contents of the cache (for each cache node being rebooted) to be lost.
When the reboot is complete, a cluster event is created.
Rebooting a cluster is currently supported on Memcached, Valkey and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Valkey or Redis OSS (cluster mode enabled) clusters.
If you make changes to parameters that require a Valkey or Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.
" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -1451,7 +1451,7 @@ {"shape":"InvalidARNFault"}, {"shape":"TagNotFoundFault"} ], - "documentation":"Removes the tags identified by the TagKeys
list from the named resource. A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
Removes the tags identified by the TagKeys
list from the named resource. A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your ElastiCache resources, with the exception of global replication group. When you add or remove tags on replication groups, those actions will be replicated to all nodes in the replication group. For more information, see Resource-level permissions.
Represents the input of a TestFailover
operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API.
Note the following
A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period.
If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
If calling this operation multiple times on different shards in the same Valkey or Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.
To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:
Replication group message: Test Failover API called for node group <node-group-id>
Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Cache cluster message: Recovering cache nodes <node-id>
Cache cluster message: Finished recovery for cache nodes <node-id>
For more information see:
Viewing ElastiCache Events in the ElastiCache User Guide
DescribeEvents in the ElastiCache API Reference
Also see, Testing Multi-AZ in the ElastiCache User Guide.
" + "documentation":"Represents the input of a TestFailover
operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API.
Note the following
A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period.
If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.
If calling this operation multiple times on different shards in the same Valkey or Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.
To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:
Replication group message: Test Failover API called for node group <node-group-id>
Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed
Cache cluster message: Recovering cache nodes <node-id>
Cache cluster message: Finished recovery for cache nodes <node-id>
For more information see:
Viewing ElastiCache Events in the ElastiCache User Guide
DescribeEvents in the ElastiCache API Reference
Also see, Testing Multi-AZ in the ElastiCache User Guide.
" }, "TestMigration":{ "name":"TestMigration", @@ -1805,7 +1805,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The name of the compute and memory capacity node type for the cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The customer outpost ARN of the cache node.
" } }, - "documentation":"Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached, Valkey or Redis OSS.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached, Valkey or Redis OSS.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.
" + "documentation":"Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.
" } }, "documentation":"A parameter that has a different value for each cache node type it is applied to. For example, in a Valkey or Redis OSS cluster, a cache.m1.large
cache node type would have a larger maxmemory
value than a cache.m1.small
type.
The 4-digit id for the node group you are configuring. For Valkey or Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Valkey or Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id.
" + "documentation":"The 4-digit id for the node group you are configuring. For Valkey or Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Valkey or Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id.
" }, "NewReplicaCount":{ "shape":"Integer", @@ -2694,7 +2694,7 @@ }, "TargetBucket":{ "shape":"String", - "documentation":"The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.
When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.
For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.
" + "documentation":"The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access.
When using this parameter to export a snapshot, be sure Amazon ElastiCache has the needed permissions to this S3 bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the Amazon ElastiCache User Guide.
For more information, see Exporting a Snapshot in the Amazon ElastiCache User Guide.
" }, "KmsKeyId":{ "shape":"String", @@ -2743,7 +2743,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
" + "documentation":"The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
" }, "CacheParameterGroupName":{ "shape":"String", @@ -2759,7 +2759,7 @@ }, "CacheSubnetGroupName":{ "shape":"String", - "documentation":"The name of the subnet group to be used for the cluster.
Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.
The name of the subnet group to be used for the cluster.
Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.
The suffix name of a Global datastore. Amazon ElastiCache automatically applies a prefix to the Global datastore ID when it is created. Each Amazon Region has its own prefix. For instance, a Global datastore ID created in the US-West-1 region will begin with \"dsdfu\" along with the suffix name you provide. The suffix, combined with the auto-generated prefix, guarantees uniqueness of the Global datastore name across multiple regions.
For a full list of Amazon Regions and their respective Global datastore iD prefixes, see Using the Amazon CLI with Global datastores .
" + "documentation":"The suffix name of a Global datastore. Amazon ElastiCache automatically applies a prefix to the Global datastore ID when it is created. Each Amazon Region has its own prefix. For instance, a Global datastore ID created in the US-West-1 region will begin with \"dsdfu\" along with the suffix name you provide. The suffix, combined with the auto-generated prefix, guarantees uniqueness of the Global datastore name across multiple regions.
For a full list of Amazon Regions and their respective Global datastore iD prefixes, see Using the Amazon CLI with Global datastores .
" }, "GlobalReplicationGroupDescription":{ "shape":"String", @@ -2996,7 +2996,7 @@ }, "MultiAZEnabled":{ "shape":"BooleanOptional", - "documentation":"A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.
" + "documentation":"A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ.
" }, "NumCacheClusters":{ "shape":"IntegerOptional", @@ -3020,7 +3020,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions
operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
" + "documentation":"The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions
operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
" }, "CacheParameterGroupName":{ "shape":"String", @@ -3036,7 +3036,7 @@ }, "CacheSubnetGroupName":{ "shape":"String", - "documentation":"The name of the cache subnet group to be used for the replication group.
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.
The name of the cache subnet group to be used for the replication group.
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups.
Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" + "documentation":"Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" }, "NetworkType":{ "shape":"NetworkType", @@ -3925,7 +3925,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The requested cache node type is not available in the specified Availability Zone. For more information, see InsufficientCacheClusterCapacity in the ElastiCache User Guide.
", + "documentation":"The requested cache node type is not available in the specified Availability Zone. For more information, see InsufficientCacheClusterCapacity in the ElastiCache User Guide.
", "error":{ "code":"InsufficientCacheClusterCapacity", "httpStatusCode":400, @@ -5135,7 +5135,7 @@ }, "NewAvailabilityZones":{ "shape":"PreferredAvailabilityZoneList", - "documentation":"This option is only supported on Memcached clusters.
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes
in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.
Scenarios:
Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5
(3 + 2) and optionally specify two Availability Zones for the two new nodes.
Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6
((3 + 2) + 1) and optionally specify an Availability Zone for the new node.
Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3
to cancel all pending operations.
The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes
to the number of current nodes.
If cross-az
is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.
Impact of new add/remove requests upon pending requests
Scenario-1
Pending Action: Delete
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending delete.
Scenario-2
Pending Action: Delete
New Request: Create
Result: The new create, pending or immediate, replaces the pending delete.
Scenario-3
Pending Action: Create
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending create.
Scenario-4
Pending Action: Create
New Request: Create
Result: The new create is added to the pending create.
Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.
This option is only supported on Memcached clusters.
The list of Availability Zones where the new Memcached cache nodes are created.
This parameter is only valid when NumCacheNodes
in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request.
Scenarios:
Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5
(3 + 2) and optionally specify two Availability Zones for the two new nodes.
Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6
((3 + 2) + 1) and optionally specify an Availability Zone for the new node.
Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3
to cancel all pending operations.
The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes
to the number of current nodes.
If cross-az
is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached.
Impact of new add/remove requests upon pending requests
Scenario-1
Pending Action: Delete
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending delete.
Scenario-2
Pending Action: Delete
New Request: Create
Result: The new create, pending or immediate, replaces the pending delete.
Scenario-3
Pending Action: Create
New Request: Delete
Result: The new delete, pending or immediate, replaces the pending create.
Scenario-4
Pending Action: Create
New Request: Create
Result: The new create is added to the pending create.
Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending.
The upgraded version of the cache engine to be run on the cache nodes.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.
" + "documentation":"The upgraded version of the cache engine to be run on the cache nodes.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.
" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -5195,7 +5195,7 @@ }, "AuthTokenUpdateStrategy":{ "shape":"AuthTokenUpdateStrategyType", - "documentation":"Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
SET - allowed only after ROTATE
DELETE - allowed only when transitioning to RBAC
For more information, see Authenticating Users with AUTH
" + "documentation":"Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
SET - allowed only after ROTATE
DELETE - allowed only when transitioning to RBAC
For more information, see Authenticating Users with AUTH
" }, "LogDeliveryConfigurations":{ "shape":"LogDeliveryConfigurationRequestList", @@ -5371,7 +5371,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"The upgraded version of the cache engine to be run on the clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
" + "documentation":"The upgraded version of the cache engine to be run on the clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -5395,7 +5395,7 @@ }, "AuthTokenUpdateStrategy":{ "shape":"AuthTokenUpdateStrategyType", - "documentation":"Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
SET - allowed only after ROTATE
DELETE - allowed only when transitioning to RBAC
For more information, see Authenticating Users with AUTH
" + "documentation":"Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token
parameter. Possible values:
ROTATE - default, if no update strategy is provided
SET - allowed only after ROTATE
DELETE - allowed only when transitioning to RBAC
For more information, see Authenticating Users with AUTH
" }, "UserGroupIdsToAdd":{ "shape":"UserGroupIdList", @@ -5549,6 +5549,10 @@ "UserIdsToRemove":{ "shape":"UserIdListInput", "documentation":"The list of user IDs to remove from the user group.
" + }, + "Engine":{ + "shape":"EngineType", + "documentation":"The engine for a user group.
" } } }, @@ -5579,6 +5583,10 @@ "AuthenticationMode":{ "shape":"AuthenticationMode", "documentation":"Specifies how to authenticate the user.
" + }, + "Engine":{ + "shape":"EngineType", + "documentation":"The engine for a specific user.
" } } }, @@ -5989,7 +5997,7 @@ }, "ChangeType":{ "shape":"ChangeType", - "documentation":"Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.
" + "documentation":"Indicates whether a change to the parameter is applied immediately or requires a reboot for the change to be applied. You can force a reboot or wait until the next maintenance window's reboot. For more information, see Rebooting a Cluster.
" } }, "documentation":"Describes an individual setting that controls some aspect of ElastiCache behavior.
" @@ -6340,7 +6348,7 @@ }, "MultiAZ":{ "shape":"MultiAZStatus", - "documentation":"A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ
" + "documentation":"A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ
" }, "ConfigurationEndpoint":{ "shape":"Endpoint", @@ -6404,7 +6412,7 @@ }, "DataTiering":{ "shape":"DataTieringStatus", - "documentation":"Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" + "documentation":"Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", @@ -6570,7 +6578,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type for the reserved cache nodes.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The cache node type for the reserved cache node.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The recommendend date to apply the service update in order to ensure compliance. For information on compliance, see Self-Service Security Updates for Compliance.
" + "documentation":"The recommendend date to apply the service update in order to ensure compliance. For information on compliance, see Self-Service Security Updates for Compliance.
" }, "ServiceUpdateStatus":{ "shape":"ServiceUpdateStatus", @@ -7231,7 +7239,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
General purpose:
Current generation:
M7g node types: cache.m7g.large
, cache.m7g.xlarge
, cache.m7g.2xlarge
, cache.m7g.4xlarge
, cache.m7g.8xlarge
, cache.m7g.12xlarge
, cache.m7g.16xlarge
For region availability, see Supported Node Types
M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
Compute optimized:
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
C1 node types: cache.c1.xlarge
Memory optimized:
Current generation:
R7g node types: cache.r7g.large
, cache.r7g.xlarge
, cache.r7g.2xlarge
, cache.r7g.4xlarge
, cache.r7g.8xlarge
, cache.r7g.12xlarge
, cache.r7g.16xlarge
For region availability, see Supported Node Types
R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
All current generation instance types are created in Amazon VPC by default.
Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.
Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.
The configuration variables appendonly
and appendfsync
are not supported on Valkey, or on Redis OSS version 2.8.22 and later.
Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" + "documentation":"Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering.
" } }, "documentation":"Represents a copy of an entire Valkey or Redis OSS cluster as of the time when the snapshot was taken.
", @@ -7703,7 +7711,7 @@ }, "ServiceUpdateRecommendedApplyByDate":{ "shape":"TStamp", - "documentation":"The recommended date to apply the service update to ensure compliance. For information on compliance, see Self-Service Security Updates for Compliance.
" + "documentation":"The recommended date to apply the service update to ensure compliance. For information on compliance, see Self-Service Security Updates for Compliance.
" }, "ServiceUpdateType":{ "shape":"ServiceUpdateType", diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index c83e92397d..98c1bb6874 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -2851,7 +2851,7 @@ "members":{ "Key":{ "shape":"ListenerAttributeKey", - "documentation":"The name of the attribute.
The following attribute is supported by Network Load Balancers, and Gateway Load Balancers.
tcp.idle_timeout.seconds
- The tcp idle timeout value, in seconds. The valid range is 60-6000 seconds. The default is 350 seconds.
The name of the attribute.
The following attribute is supported by Network Load Balancers, and Gateway Load Balancers.
tcp.idle_timeout.seconds
- The tcp idle timeout value, in seconds. The valid range is 60-6000 seconds. The default is 350 seconds.
The following attributes are only supported by Application Load Balancers.
routing.http.request.x_amzn_mtls_clientcert_serial_number.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert-Serial-Number HTTP request header.
routing.http.request.x_amzn_mtls_clientcert_issuer.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert-Issuer HTTP request header.
routing.http.request.x_amzn_mtls_clientcert_subject.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert-Subject HTTP request header.
routing.http.request.x_amzn_mtls_clientcert_validity.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert-Validity HTTP request header.
routing.http.request.x_amzn_mtls_clientcert_leaf.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert-Leaf HTTP request header.
routing.http.request.x_amzn_mtls_clientcert.header_name
- Enables you to modify the header name of the X-Amzn-Mtls-Clientcert HTTP request header.
routing.http.request.x_amzn_tls_version.header_name
- Enables you to modify the header name of the X-Amzn-Tls-Version HTTP request header.
routing.http.request.x_amzn_tls_cipher_suite.header_name
- Enables you to modify the header name of the X-Amzn-Tls-Cipher-Suite HTTP request header.
routing.http.response.server.enabled
- Enables you to allow or remove the HTTP response server header.
routing.http.response.strict_transport_security.header_value
- Informs browsers that the site should only be accessed using HTTPS, and that any future attempts to access it using HTTP should automatically be converted to HTTPS.
routing.http.response.access_control_allow_origin.header_value
- Specifies which origins are allowed to access the server.
routing.http.response.access_control_allow_methods.header_value
- Returns which HTTP methods are allowed when accessing the server from a different origin.
routing.http.response.access_control_allow_headers.header_value
- Specifies which headers can be used during the request.
routing.http.response.access_control_allow_credentials.header_value
- Indicates whether the browser should include credentials such as cookies or authentication when making requests.
routing.http.response.access_control_expose_headers.header_value
- Returns which headers the browser can expose to the requesting client.
routing.http.response.access_control_max_age.header_value
- Specifies how long the results of a preflight request can be cached, in seconds.
routing.http.response.content_security_policy.header_value
- Specifies restrictions enforced by the browser to help minimize the risk of certain types of security threats.
routing.http.response.x_content_type_options.header_value
- Indicates whether the MIME types advertised in the Content-Type headers should be followed and not be changed.
routing.http.response.x_frame_options.header_value
- Indicates whether the browser is allowed to render a page in a frame, iframe, embed or object.
The name of the attribute.
The following attributes are supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The possible values are true
and false
. The default for Network Load Balancers and Gateway Load Balancers is false
. The default for Application Load Balancers is true
, and can't be changed.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
ipv6.deny_all_igw_traffic
- Blocks internet gateway (IGW) access to the load balancer. It is set to false
for internet-facing load balancers and true
for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
client_keep_alive.seconds
- The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.
connection_logs.s3.enabled
- Indicates whether connection logs are enabled. The value is true
or false
. The default is false
.
connection_logs.s3.bucket
- The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
connection_logs.s3.prefix
- The prefix for the location in the S3 bucket for the connection logs.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http.preserve_host_header.enabled
- Indicates whether the Application Load Balancer should preserve the Host
header in the HTTP request and send it to the target without any change. The possible values are true
and false
. The default is false
.
routing.http.x_amzn_tls_version_and_cipher_suite.enabled
- Indicates whether the two headers (x-amzn-tls-version
and x-amzn-tls-cipher-suite
), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version
header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite
header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true
and false
. The default is false
.
routing.http.xff_client_port.enabled
- Indicates whether the X-Forwarded-For
header should preserve the source port that the client used to connect to the load balancer. The possible values are true
and false
. The default is false
.
routing.http.xff_header_processing.mode
- Enables you to modify, preserve, or remove the X-Forwarded-For
header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append
, preserve
, and remove
. The default is append
.
If the value is append
, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For
header in the HTTP request before it sends it to targets.
If the value is preserve
the Application Load Balancer preserves the X-Forwarded-For
header in the HTTP request, and sends it to targets without any change.
If the value is remove
, the Application Load Balancer removes the X-Forwarded-For
header in the HTTP request before it sends it to targets.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The possible values are true
and false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true
and false
. The default is false
.
The following attributes are supported by only Network Load Balancers:
dns_record.client_routing_policy
- Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity
with 100 percent zonal affinity, partial_availability_zone_affinity
with 85 percent zonal affinity, and any_availability_zone
with 0 percent zonal affinity.
zonal_shift.config.enabled
- Indicates whether zonal shift is enabled. The possible values are true
and false
. The default is false
.
The name of the attribute.
The following attributes are supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The possible values are true
and false
. The default for Network Load Balancers and Gateway Load Balancers is false
. The default for Application Load Balancers is true
, and can't be changed.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
ipv6.deny_all_igw_traffic
- Blocks internet gateway (IGW) access to the load balancer. It is set to false
for internet-facing load balancers and true
for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.
zonal_shift.config.enabled
- Indicates whether zonal shift is enabled. The possible values are true
and false
. The default is false
.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
client_keep_alive.seconds
- The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.
connection_logs.s3.enabled
- Indicates whether connection logs are enabled. The value is true
or false
. The default is false
.
connection_logs.s3.bucket
- The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
connection_logs.s3.prefix
- The prefix for the location in the S3 bucket for the connection logs.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http.preserve_host_header.enabled
- Indicates whether the Application Load Balancer should preserve the Host
header in the HTTP request and send it to the target without any change. The possible values are true
and false
. The default is false
.
routing.http.x_amzn_tls_version_and_cipher_suite.enabled
- Indicates whether the two headers (x-amzn-tls-version
and x-amzn-tls-cipher-suite
), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version
header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite
header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true
and false
. The default is false
.
routing.http.xff_client_port.enabled
- Indicates whether the X-Forwarded-For
header should preserve the source port that the client used to connect to the load balancer. The possible values are true
and false
. The default is false
.
routing.http.xff_header_processing.mode
- Enables you to modify, preserve, or remove the X-Forwarded-For
header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append
, preserve
, and remove
. The default is append
.
If the value is append
, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For
header in the HTTP request before it sends it to targets.
If the value is preserve
the Application Load Balancer preserves the X-Forwarded-For
header in the HTTP request, and sends it to targets without any change.
If the value is remove
, the Application Load Balancer removes the X-Forwarded-For
header in the HTTP request before it sends it to targets.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The possible values are true
and false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true
and false
. The default is false
.
The following attributes are supported by only Network Load Balancers:
dns_record.client_routing_policy
- Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity
with 100 percent zonal affinity, partial_availability_zone_affinity
with 85 percent zonal affinity, and any_availability_zone
with 0 percent zonal affinity.
Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.
At least one event ARN is required.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.
Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.
At least one event ARN is required.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.
Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Service.
At least one event Amazon Resource Name (ARN) and account ID are required.
Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.
Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service.
At least one event Amazon Resource Name (ARN) and account ID are required.
Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.
Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the Health event, such as the category, Amazon Web Service, and event code. The metadata for each event appears in the EventType object.
If you don't specify a filter criteria, the API operation returns all event types, in no particular order.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the Health event, such as the category, Amazon Web Services service, and event code. The metadata for each event appears in the EventType object.
If you don't specify a filter criteria, the API operation returns all event types, in no particular order.
This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
A map of entity tags attached to the affected entity.
Currently, the tags
property isn't supported.
Additional metadata about the affected entity.
" } }, "documentation":"Information about an entity that is affected by a Health event.
" @@ -311,7 +317,7 @@ }, "eventScopeCode":{ "shape":"eventScopeCode", - "documentation":"This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
The Amazon Web Service that is affected by the event. For example, EC2
, RDS
.
The Amazon Web Services service that is affected by the event. For example, EC2
, RDS
.
This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
Summary information about an Health event.
Health events can be public or account-specific:
Public events might be service events that are not specific to an Amazon Web Services account. For example, if there is an issue with an Amazon Web Services Region, Health provides information about the event, even if you don't use services or resources in that Region.
Account-specific events are specific to either your Amazon Web Services account or an account in your organization. For example, if there's an issue with Amazon Elastic Compute Cloud in a Region that you use, Health provides information about the event and the affected resources in the account.
You can determine if an event is public or account-specific by using the eventScopeCode
parameter. For more information, see eventScopeCode.
The Amazon Web Services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
The Amazon Web Service that is affected by the event. For example, EC2
, RDS
.
The Amazon Web Services service that is affected by the event. For example, EC2
, RDS
.
The Amazon Web Services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
The Amazon Web Service that is affected by the event, such as EC2 and RDS.
" + "documentation":"The Amazon Web Services service that is affected by the event, such as EC2 and RDS.
" }, "eventTypeCode":{ "shape":"eventTypeCode", @@ -1079,7 +1085,7 @@ }, "eventScopeCode":{ "shape":"eventScopeCode", - "documentation":"This parameter specifies if the Health event is a public Amazon Web Service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
If the eventScopeCode
value is PUBLIC
, then the affectedAccounts
value is always empty.
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then the affectedAccounts
value lists the affected Amazon Web Services accounts in your organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you have Amazon Web Services accounts that use that service, those account IDs appear in the response.
If the eventScopeCode
value is NONE
, then the eventArn
that you specified in the request is invalid or doesn't exist.
The Amazon Web Services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
The Health API provides access to the Health information that appears in the Health Dashboard. You can use the API operations to get information about events that might affect your Amazon Web Services and resources.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan from Amazon Web Services Support to use the Health API. If you call the Health API from an Amazon Web Services account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, you receive a SubscriptionRequiredException
error.
For API access, you need an access key ID and a secret access key. Use temporary credentials instead of long-term access keys when possible. Temporary credentials include an access key ID, a secret access key, and a security token that indicates when the credentials expire. For more information, see Best practices for managing Amazon Web Services access keys in the Amazon Web Services General Reference.
You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to call the Health API operations. Health supports a multi-Region application architecture and has two regional endpoints in an active-passive configuration. You can use the high availability endpoint example to determine which Amazon Web Services Region is active, so that you can get the latest information from the API. For more information, see Accessing the Health API in the Health User Guide.
For authentication of requests, Health uses the Signature Version 4 Signing Process.
If your Amazon Web Services account is part of Organizations, you can use the Health organizational view feature. This feature provides a centralized view of Health events across all accounts in your organization. You can aggregate Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating Health events in the Health User Guide.
When you use the Health API operations to return Health events, see the following recommendations:
Use the eventScopeCode parameter to specify whether to return Health events that are public or account-specific.
Use pagination to view all events from the response. For example, if you call the DescribeEventsForOrganization
operation to get all events in your organization, you might receive several page results. Specify the nextToken
in the next request to return more results.
The Health API provides access to the Health information that appears in the Health Dashboard. You can use the API operations to get information about events that might affect your Amazon Web Services services and resources.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan from Amazon Web Services Support to use the Health API. If you call the Health API from an Amazon Web Services account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, you receive a SubscriptionRequiredException
error.
For API access, you need an access key ID and a secret access key. Use temporary credentials instead of long-term access keys when possible. Temporary credentials include an access key ID, a secret access key, and a security token that indicates when the credentials expire. For more information, see Best practices for managing Amazon Web Services access keys in the Amazon Web Services General Reference.
You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to call the Health API operations. Health supports a multi-Region application architecture and has two regional endpoints in an active-passive configuration. You can use the high availability endpoint example to determine which Amazon Web Services Region is active, so that you can get the latest information from the API. For more information, see Accessing the Health API in the Health User Guide.
For authentication of requests, Health uses the Signature Version 4 Signing Process.
If your Amazon Web Services account is part of Organizations, you can use the Health organizational view feature. This feature provides a centralized view of Health events across all accounts in your organization. You can aggregate Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating Health events in the Health User Guide.
When you use the Health API operations to return Health events, see the following recommendations:
Use the eventScopeCode parameter to specify whether to return Health events that are public or account-specific.
Use pagination to view all events from the response. For example, if you call the DescribeEventsForOrganization
operation to get all events in your organization, you might receive several page results. Specify the nextToken
in the next request to return more results.
Gets details of a job execution.
" + "documentation":"Gets details of a job execution.
Requires permission to access the DescribeJobExecution action.
" }, "GetPendingJobExecutions":{ "name":"GetPendingJobExecutions", @@ -44,7 +46,25 @@ {"shape":"ServiceUnavailableException"}, {"shape":"CertificateValidationException"} ], - "documentation":"Gets the list of all jobs for a thing that are not in a terminal status.
" + "documentation":"Gets the list of all jobs for a thing that are not in a terminal status.
Requires permission to access the GetPendingJobExecutions action.
" + }, + "StartCommandExecution":{ + "name":"StartCommandExecution", + "http":{ + "method":"POST", + "requestUri":"/command-executions" + }, + "input":{"shape":"StartCommandExecutionRequest"}, + "output":{"shape":"StartCommandExecutionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Using the command created with the CreateCommand
API, start a command execution on a specific device.
Gets and starts the next pending (status IN_PROGRESS or QUEUED) job execution for a thing.
" + "documentation":"Gets and starts the next pending (status IN_PROGRESS or QUEUED) job execution for a thing.
Requires permission to access the StartNextPendingJobExecution action.
" }, "UpdateJobExecution":{ "name":"UpdateJobExecution", @@ -79,12 +99,17 @@ {"shape":"CertificateValidationException"}, {"shape":"InvalidStateTransitionException"} ], - "documentation":"Updates the status of a job execution.
" + "documentation":"Updates the status of a job execution.
Requires permission to access the UpdateJobExecution action.
" } }, "shapes":{ "ApproximateSecondsBeforeTimedOut":{"type":"long"}, "BinaryBlob":{"type":"blob"}, + "BinaryParameterValue":{ + "type":"blob", + "min":1 + }, + "BooleanParameterValue":{"type":"boolean"}, "CertificateValidationException":{ "type":"structure", "members":{ @@ -97,6 +122,82 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ClientRequestTokenV2":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\x21-\\x7E]+$" + }, + "CommandArn":{"type":"string"}, + "CommandExecutionId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "CommandExecutionParameterMap":{ + "type":"map", + "key":{"shape":"CommandParameterName"}, + "value":{"shape":"CommandParameterValue"}, + "min":1 + }, + "CommandExecutionTimeoutInSeconds":{ + "type":"long", + "min":1 + }, + "CommandParameterName":{ + "type":"string", + "max":192, + "min":1, + "pattern":"^[.$a-zA-Z0-9_-]+$" + }, + "CommandParameterValue":{ + "type":"structure", + "members":{ + "S":{ + "shape":"StringParameterValue", + "documentation":"An attribute of type String. For example:
\"S\": \"Hello\"
An attribute of type Boolean. For example:
\"BOOL\": true
An attribute of type Integer (Thirty-Two Bits).
" + }, + "L":{ + "shape":"LongParameterValue", + "documentation":"An attribute of type Long.
" + }, + "D":{ + "shape":"DoubleParameterValue", + "documentation":"An attribute of type Double (Sixty-Four Bits).
" + }, + "BIN":{ + "shape":"BinaryParameterValue", + "documentation":"An attribute of type Binary.
" + }, + "UL":{ + "shape":"UnsignedLongParameterValue", + "documentation":"An attribute of type Unsigned Long.
" + } + }, + "documentation":"The list of values used to describe a specific command parameter.
" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"}, + "resourceId":{ + "shape":"resourceId", + "documentation":"A conflict occurred while performing the API request on the resource ID.
" + } + }, + "documentation":"A conflict has occurred when performing the API request.
", + "error":{"httpStatusCode":409}, + "exception":true + }, "DescribeJobExecutionJobId":{ "type":"string", "pattern":"[a-zA-Z0-9_-]+|^\\$next" @@ -122,7 +223,7 @@ }, "includeJobDocument":{ "shape":"IncludeJobDocument", - "documentation":"Optional. When set to true, the response contains the job document. The default is false.
", + "documentation":"Optional. Unless set to false, the response contains the job document. The default is true.
", "location":"querystring", "locationName":"includeJobDocument" }, @@ -156,10 +257,10 @@ }, "DetailsValue":{ "type":"string", - "max":1024, "min":1, - "pattern":"[^\\p{C}]*+" + "pattern":"[^\\p{C}]+" }, + "DoubleParameterValue":{"type":"double"}, "ExecutionNumber":{"type":"long"}, "ExpectedVersion":{"type":"long"}, "GetPendingJobExecutionsRequest":{ @@ -189,6 +290,17 @@ }, "IncludeExecutionState":{"type":"boolean"}, "IncludeJobDocument":{"type":"boolean"}, + "IntegerParameterValue":{"type":"integer"}, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"An internal server error occurred when performing the API request.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, "InvalidRequestException":{ "type":"structure", "members":{ @@ -197,7 +309,7 @@ "documentation":"The message for the exception.
" } }, - "documentation":"The contents of the request were invalid. For example, this code is returned when an UpdateJobExecution request contains invalid status details. The message contains details about the error.
", + "documentation":"The contents of the request were invalid.
", "error":{"httpStatusCode":400}, "exception":true }, @@ -227,27 +339,27 @@ }, "status":{ "shape":"JobExecutionStatus", - "documentation":"The status of the job execution. Can be one of: \"QUEUED\", \"IN_PROGRESS\", \"FAILED\", \"SUCCESS\", \"CANCELED\", \"REJECTED\", or \"REMOVED\".
" + "documentation":"The status of the job execution. Can be one of: \"QUEUED\", \"IN_PROGRESS\", \"FAILED\", \"SUCCESS\", \"CANCELED\", \"TIMED_OUT\", \"REJECTED\", or \"REMOVED\".
" }, "statusDetails":{ "shape":"DetailsMap", - "documentation":"A collection of name/value pairs that describe the status of the job execution.
" + "documentation":"A collection of name/value pairs that describe the status of the job execution.
The maximum length of the value in the name/value pair is 1,024 characters.
" }, "queuedAt":{ "shape":"QueuedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution was enqueued.
" + "documentation":"The time, in seconds since the epoch, when the job execution was enqueued.
" }, "startedAt":{ "shape":"StartedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution was started.
" + "documentation":"The time, in seconds since the epoch, when the job execution was started.
" }, "lastUpdatedAt":{ "shape":"LastUpdatedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution was last updated.
" + "documentation":"The time, in seconds since the epoch, when the job execution was last updated.
" }, "approximateSecondsBeforeTimedOut":{ "shape":"ApproximateSecondsBeforeTimedOut", - "documentation":"The estimated number of seconds that remain before the job execution status will be changed to TIMED_OUT
.
The estimated number of seconds that remain before the job execution status will be changed to TIMED_OUT
. The actual job execution timeout can occur up to 60 seconds later than the estimated duration.
The status of the job execution. Can be one of: \"QUEUED\", \"IN_PROGRESS\", \"FAILED\", \"SUCCESS\", \"CANCELED\", \"REJECTED\", or \"REMOVED\".
" + "documentation":"The status of the job execution. Can be one of: \"QUEUED\", \"IN_PROGRESS\", \"FAILED\", \"SUCCESS\", \"CANCELED\", \"TIMED_OUT\", \"REJECTED\", or \"REMOVED\".
" }, "statusDetails":{ "shape":"DetailsMap", - "documentation":"A collection of name/value pairs that describe the status of the job execution.
" + "documentation":"A collection of name/value pairs that describe the status of the job execution.
The maximum length of the value in the name/value pair is 1,024 characters.
" }, "versionNumber":{ "shape":"VersionNumber", @@ -304,19 +416,19 @@ }, "queuedAt":{ "shape":"QueuedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution was enqueued.
" + "documentation":"The time, in seconds since the epoch, when the job execution was enqueued.
" }, "startedAt":{ "shape":"StartedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution started.
" + "documentation":"The time, in seconds since the epoch, when the job execution started.
" }, "lastUpdatedAt":{ "shape":"LastUpdatedAt", - "documentation":"The time, in milliseconds since the epoch, when the job execution was last updated.
" + "documentation":"The time, in seconds since the epoch, when the job execution was last updated.
" }, "versionNumber":{ "shape":"VersionNumber", - "documentation":"The version of the job execution. Job execution versions are incremented each time AWS IoT Jobs receives an update from a device.
" + "documentation":"The version of the job execution. Job execution versions are incremented each time IoT Jobs receives an update from a device.
" }, "executionNumber":{ "shape":"ExecutionNumber", @@ -336,6 +448,7 @@ "pattern":"[a-zA-Z0-9_-]+" }, "LastUpdatedAt":{"type":"long"}, + "LongParameterValue":{"type":"long"}, "QueuedAt":{"type":"long"}, "ResourceNotFoundException":{ "type":"structure", @@ -349,6 +462,15 @@ "error":{"httpStatusCode":404}, "exception":true }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"errorMessage"} + }, + "documentation":"The service quota has been exceeded for this request.
", + "error":{"httpStatusCode":402}, + "exception":true + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -362,6 +484,45 @@ "exception":true, "fault":true }, + "StartCommandExecutionRequest":{ + "type":"structure", + "required":[ + "targetArn", + "commandArn" + ], + "members":{ + "targetArn":{ + "shape":"TargetArn", + "documentation":"The Amazon Resource Number (ARN) of the device where the command execution is occurring.
" + }, + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Number (ARN) of the command. For example, arn:aws:iot:<region>:<accountid>:command/<commandName>
A list of parameters that are required by the StartCommandExecution
API when performing the command on a device.
Specifies the amount of time in second the device has to finish the command execution. A timer is started as soon as the command execution is created. If the command execution status is not set to another terminal state before the timer expires, it will automatically update to TIMED_OUT
.
The client token is used to implement idempotency. It ensures that the request completes no more than one time. If you retry a request with the same token and the same parameters, the request will complete successfully. However, if you retry the request using the same token but different parameters, an HTTP 409 conflict occurs. If you omit this value, Amazon Web Services SDKs will automatically generate a unique client request.
", + "idempotencyToken":true + } + } + }, + "StartCommandExecutionResponse":{ + "type":"structure", + "members":{ + "executionId":{ + "shape":"CommandExecutionId", + "documentation":"A unique identifier for the command execution.
" + } + } + }, "StartNextPendingJobExecutionRequest":{ "type":"structure", "required":["thingName"], @@ -374,11 +535,11 @@ }, "statusDetails":{ "shape":"DetailsMap", - "documentation":"A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
" + "documentation":"A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
The maximum length of the value in the name/value pair is 1,024 characters.
" }, "stepTimeoutInMinutes":{ "shape":"StepTimeoutInMinutes", - "documentation":"Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by calling UpdateJobExecution
, setting the status to IN_PROGRESS
and specifying a new timeout value in field stepTimeoutInMinutes
) the job execution status will be automatically set to TIMED_OUT
. Note that setting this timeout has no effect on that job execution timeout which may have been specified when the job was created (CreateJob
using field timeoutConfig
).
Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by calling UpdateJobExecution
, setting the status to IN_PROGRESS
, and specifying a new timeout value in field stepTimeoutInMinutes
) the job execution status will be automatically set to TIMED_OUT
. Note that setting the step timeout has no effect on the in progress timeout that may have been specified when the job was created (CreateJob
using field timeoutConfig
).
Valid values for this parameter range from 1 to 10080 (1 minute to 7 days).
" } } }, @@ -393,6 +554,14 @@ }, "StartedAt":{"type":"long"}, "StepTimeoutInMinutes":{"type":"long"}, + "StringParameterValue":{ + "type":"string", + "min":1 + }, + "TargetArn":{ + "type":"string", + "max":2048 + }, "TerminalStateException":{ "type":"structure", "members":{ @@ -424,6 +593,12 @@ "error":{"httpStatusCode":429}, "exception":true }, + "UnsignedLongParameterValue":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^[0-9]*$" + }, "UpdateJobExecutionRequest":{ "type":"structure", "required":[ @@ -450,11 +625,11 @@ }, "statusDetails":{ "shape":"DetailsMap", - "documentation":"Optional. A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
" + "documentation":"Optional. A collection of name/value pairs that describe the status of the job execution. If not specified, the statusDetails are unchanged.
The maximum length of the value in the name/value pair is 1,024 characters.
" }, "stepTimeoutInMinutes":{ "shape":"StepTimeoutInMinutes", - "documentation":"Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by again calling UpdateJobExecution
, setting the status to IN_PROGRESS
and specifying a new timeout value in this field) the job execution status will be automatically set to TIMED_OUT
. Note that setting or resetting this timeout has no effect on that job execution timeout which may have been specified when the job was created (CreateJob
using field timeoutConfig
).
Specifies the amount of time this device has to finish execution of this job. If the job execution status is not set to a terminal state before this timer expires, or before the timer is reset (by again calling UpdateJobExecution
, setting the status to IN_PROGRESS
, and specifying a new timeout value in this field) the job execution status will be automatically set to TIMED_OUT
. Note that setting or resetting the step timeout has no effect on the in progress timeout that may have been specified when the job was created (CreateJob
using field timeoutConfig
).
Valid values for this parameter range from 1 to 10080 (1 minute to 7 days). A value of -1 is also valid and will cancel the current step timer (created by an earlier use of UpdateJobExecutionRequest
).
A validation error occurred when performing the API request.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "VersionNumber":{"type":"long"}, - "errorMessage":{"type":"string"} + "errorMessage":{"type":"string"}, + "resourceId":{"type":"string"} }, - "documentation":"AWS IoT Jobs is a service that allows you to define a set of jobs — remote operations that are sent to and executed on one or more devices connected to AWS IoT. For example, you can define a job that instructs a set of devices to download and install application or firmware updates, reboot, rotate certificates, or perform remote troubleshooting operations.
To create a job, you make a job document which is a description of the remote operations to be performed, and you specify a list of targets that should perform the operations. The targets can be individual things, thing groups or both.
AWS IoT Jobs sends a message to inform the targets that a job is available. The target starts the execution of the job by downloading the job document, performing the operations it specifies, and reporting its progress to AWS IoT. The Jobs service provides commands to track the progress of a job on a specific target and for all the targets of the job
" + "documentation":"IoT Jobs is a service that allows you to define a set of jobs — remote operations that are sent to and executed on one or more devices connected to Amazon Web Services IoT Core. For example, you can define a job that instructs a set of devices to download and install application or firmware updates, reboot, rotate certificates, or perform remote troubleshooting operations.
Find the endpoint address for actions in the IoT jobs data plane by running this CLI command:
aws iot describe-endpoint --endpoint-type iot:Jobs
The service name used by Amazon Web Services Signature Version 4 to sign requests is: iot-jobs-data.
To create a job, you make a job document which is a description of the remote operations to be performed, and you specify a list of targets that should perform the operations. The targets can be individual things, thing groups or both.
IoT Jobs sends a message to inform the targets that a job is available. The target starts the execution of the job by downloading the job document, performing the operations it specifies, and reporting its progress to Amazon Web Services IoT Core. The Jobs service provides commands to track the progress of a job on a specific target and for all the targets of the job
" } diff --git a/botocore/data/iot/2015-05-28/paginators-1.json b/botocore/data/iot/2015-05-28/paginators-1.json index 2f18ca4ca3..4b227cd5cd 100644 --- a/botocore/data/iot/2015-05-28/paginators-1.json +++ b/botocore/data/iot/2015-05-28/paginators-1.json @@ -373,6 +373,18 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "thingPrincipalObjects" + }, + "ListCommandExecutions": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "commandExecutions" + }, + "ListCommands": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "commands" } } } diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 86bdbe1e02..b352490225 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -396,6 +396,23 @@ ], "documentation":"Creates an Amazon Web Services IoT Core certificate provider. You can use Amazon Web Services IoT Core certificate provider to customize how to sign a certificate signing request (CSR) in IoT fleet provisioning. For more information, see Customizing certificate signing using Amazon Web Services IoT Core certificate provider from Amazon Web Services IoT Core Developer Guide.
Requires permission to access the CreateCertificateProvider action.
After you create a certificate provider, the behavior of CreateCertificateFromCsr
API for fleet provisioning will change and all API calls to CreateCertificateFromCsr
will invoke the certificate provider to create the certificates. It can take up to a few minutes for this behavior to change after a certificate provider is created.
Creates a command. A command contains reusable configurations that can be applied before they are sent to the devices.
" + }, "CreateCustomMetric":{ "name":"CreateCustomMetric", "http":{ @@ -996,6 +1013,38 @@ ], "documentation":"Deletes a certificate provider.
Requires permission to access the DeleteCertificateProvider action.
If you delete the certificate provider resource, the behavior of CreateCertificateFromCsr
will resume, and IoT will create certificates signed by IoT from a certificate signing request (CSR).
Delete a command resource.
" + }, + "DeleteCommandExecution":{ + "name":"DeleteCommandExecution", + "http":{ + "method":"DELETE", + "requestUri":"/command-executions/{executionId}" + }, + "input":{"shape":"DeleteCommandExecutionRequest"}, + "output":{"shape":"DeleteCommandExecutionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Delete a command execution.
Only command executions that enter a terminal state can be deleted from your account.
Returns the approximate count of unique values that match the query.
Requires permission to access the GetCardinality action.
" }, + "GetCommand":{ + "name":"GetCommand", + "http":{ + "method":"GET", + "requestUri":"/commands/{commandId}" + }, + "input":{"shape":"GetCommandRequest"}, + "output":{"shape":"GetCommandResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Gets information about the specified command.
" + }, + "GetCommandExecution":{ + "name":"GetCommandExecution", + "http":{ + "method":"GET", + "requestUri":"/command-executions/{executionId}" + }, + "input":{"shape":"GetCommandExecutionRequest"}, + "output":{"shape":"GetCommandExecutionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Gets information about the specific command execution on a single device.
" + }, "GetEffectivePolicies":{ "name":"GetEffectivePolicies", "http":{ @@ -2704,6 +2785,37 @@ ], "documentation":"List the device certificates signed by the specified CA certificate.
Requires permission to access the ListCertificatesByCA action.
" }, + "ListCommandExecutions":{ + "name":"ListCommandExecutions", + "http":{ + "method":"POST", + "requestUri":"/command-executions" + }, + "input":{"shape":"ListCommandExecutionsRequest"}, + "output":{"shape":"ListCommandExecutionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"List all command executions.
You must provide only the startedTimeFilter
or the completedTimeFilter
information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe.
List all commands in your account.
" + }, "ListCustomMetrics":{ "name":"ListCustomMetrics", "http":{ @@ -4084,6 +4196,23 @@ ], "documentation":"Updates a certificate provider.
Requires permission to access the UpdateCertificateProvider action.
" }, + "UpdateCommand":{ + "name":"UpdateCommand", + "http":{ + "method":"PATCH", + "requestUri":"/commands/{commandId}" + }, + "input":{"shape":"UpdateCommandRequest"}, + "output":{"shape":"UpdateCommandResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Update information about a command or mark a command for deprecation.
" + }, "UpdateCustomMetric":{ "name":"UpdateCustomMetric", "http":{ @@ -6074,8 +6203,18 @@ }, "documentation":"The properties of a billing group.
" }, + "BinaryCommandExecutionResult":{ + "type":"blob", + "min":1 + }, + "BinaryParameterValue":{ + "type":"blob", + "min":1 + }, "Boolean":{"type":"boolean"}, + "BooleanCommandExecutionResult":{"type":"boolean"}, "BooleanKey":{"type":"boolean"}, + "BooleanParameterValue":{"type":"boolean"}, "BooleanWrapperObject":{"type":"boolean"}, "Bucket":{ "type":"structure", @@ -6767,6 +6906,251 @@ "documentation":"Describes the signature for a file.
" }, "CognitoIdentityPoolId":{"type":"string"}, + "CommandArn":{"type":"string"}, + "CommandDescription":{ + "type":"string", + "max":2028, + "pattern":"[^\\p{C}]*" + }, + "CommandExecutionId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "CommandExecutionParameterMap":{ + "type":"map", + "key":{"shape":"CommandParameterName"}, + "value":{"shape":"CommandParameterValue"}, + "min":1 + }, + "CommandExecutionResult":{ + "type":"structure", + "members":{ + "S":{ + "shape":"StringCommandExecutionResult", + "documentation":"An attribute of type String. For example:
\"S\": \"Hello\"
An attribute of type Boolean. For example:
\"BOOL\": true
An attribute of type Binary.
" + } + }, + "documentation":"The result value of the command execution. The device can use the result field to share additional details about the execution such as a return value of a remote function call.
This field is not applicable if you use the AWS-IoT-FleetWise
namespace.
The Amazon Resource Name (ARN) of the command execution.
" + }, + "executionId":{ + "shape":"CommandExecutionId", + "documentation":"The unique identifier of the command execution.
" + }, + "targetArn":{ + "shape":"TargetArn", + "documentation":"The Amazon Resource Name (ARN) of the target device for which the command is being executed.
" + }, + "status":{ + "shape":"CommandExecutionStatus", + "documentation":"The status of the command executions.
" + }, + "createdAt":{ + "shape":"DateType", + "documentation":"The date and time at which the command execution was created for the target device.
" + }, + "startedAt":{ + "shape":"DateType", + "documentation":"The date and time at which the command started executing on the target device.
" + }, + "completedAt":{ + "shape":"DateType", + "documentation":"The date and time at which the command completed executing on the target device.
" + } + }, + "documentation":"Summary information about a particular command execution.
" + }, + "CommandExecutionSummaryList":{ + "type":"list", + "member":{"shape":"CommandExecutionSummary"} + }, + "CommandExecutionTimeoutInSeconds":{ + "type":"long", + "min":1 + }, + "CommandId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "CommandMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "CommandNamespace":{ + "type":"string", + "enum":[ + "AWS-IoT", + "AWS-IoT-FleetWise" + ] + }, + "CommandParameter":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"CommandParameterName", + "documentation":"The name of a specific parameter used in a command and command execution.
" + }, + "value":{ + "shape":"CommandParameterValue", + "documentation":"The value used to describe the command. When you assign a value to a parameter, it will override any default value that you had already specified.
" + }, + "defaultValue":{ + "shape":"CommandParameterValue", + "documentation":"The default value used to describe the command. This is the value assumed by the parameter if no other value is assigned to it.
" + }, + "description":{ + "shape":"CommandParameterDescription", + "documentation":"The description of the command parameter.
" + } + }, + "documentation":"A map of key-value pairs that describe the command.
" + }, + "CommandParameterDescription":{ + "type":"string", + "max":2028, + "pattern":"[^\\p{C}]*" + }, + "CommandParameterList":{ + "type":"list", + "member":{"shape":"CommandParameter"}, + "min":1 + }, + "CommandParameterName":{ + "type":"string", + "max":192, + "min":1, + "pattern":"^[.$a-zA-Z0-9_-]+$" + }, + "CommandParameterValue":{ + "type":"structure", + "members":{ + "S":{ + "shape":"StringParameterValue", + "documentation":"An attribute of type String. For example:
\"S\": \"Hello\"
An attribute of type Boolean. For example:
\"BOOL\": true
An attribute of type Integer (Thirty-Two Bits).
" + }, + "L":{ + "shape":"LongParameterValue", + "documentation":"An attribute of type Long.
" + }, + "D":{ + "shape":"DoubleParameterValue", + "documentation":"An attribute of type Double (Sixty-Four Bits).
" + }, + "BIN":{ + "shape":"BinaryParameterValue", + "documentation":"An attribute of type Binary. For example:
\"B\": \"dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk\"
An attribute of type unsigned long.
" + } + }, + "documentation":"The range of possible values that's used to describe a specific command parameter.
The commandParameterValue
can only have one of the below fields listed.
The static payload file for the command.
" + }, + "contentType":{ + "shape":"MimeType", + "documentation":"The content type that specifies the format type of the payload file. This field must use a type/subtype format, such as application/json
. For information about various content types, see Common MIME types.
The command payload object that contains the instructions for the device to process.
" + }, + "CommandPayloadBlob":{"type":"blob"}, + "CommandSummary":{ + "type":"structure", + "members":{ + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Name (ARN) of the command.
" + }, + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier of the command.
" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"The display name of the command.
" + }, + "deprecated":{ + "shape":"DeprecationFlag", + "documentation":"Indicates whether the command has been deprecated.
" + }, + "createdAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command was created.
" + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command was last updated.
" + }, + "pendingDeletion":{ + "shape":"BooleanWrapperObject", + "documentation":"Indicates whether the command is pending deletion.
" + } + }, + "documentation":"Summary information about a particular command resource.
" + }, + "CommandSummaryList":{ + "type":"list", + "member":{"shape":"CommandSummary"} + }, "Comment":{ "type":"string", "max":2028, @@ -6837,7 +7221,7 @@ "documentation":"A resource with the same name already exists.
" } }, - "documentation":"A resource with the same name already exists.
", + "documentation":"The request conflicts with the current state of the resource.
", "error":{"httpStatusCode":409}, "exception":true }, @@ -7088,6 +7472,59 @@ } } }, + "CreateCommandRequest":{ + "type":"structure", + "required":["commandId"], + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"A unique identifier for the command. We recommend using UUID. Alpha-numeric characters, hyphens, and underscores are valid for use here.
", + "location":"uri", + "locationName":"commandId" + }, + "namespace":{ + "shape":"CommandNamespace", + "documentation":"The namespace of the command. The MQTT reserved topics and validations will be used for command executions according to the namespace setting.
" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"The user-friendly name in the console for the command. This name doesn't have to be unique. You can update the user-friendly name after you define it.
" + }, + "description":{ + "shape":"CommandDescription", + "documentation":"A short text decription of the command.
" + }, + "payload":{ + "shape":"CommandPayload", + "documentation":"The payload object for the command. You must specify this information when using the AWS-IoT
namespace.
You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type.
" + }, + "mandatoryParameters":{ + "shape":"CommandParameterList", + "documentation":"A list of parameters that are required by the StartCommandExecution
API. These parameters need to be specified only when using the AWS-IoT-FleetWise
namespace. You can either specify them here or when running the command using the StartCommandExecution
API.
The IAM role that allows access to create the command.
" + }, + "tags":{ + "shape":"TagList", + "documentation":"Name-value pairs that are used as metadata to manage a command.
" + } + } + }, + "CreateCommandResponse":{ + "type":"structure", + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier for the command.
" + }, + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Number (ARN) of the command. For example, arn:aws:iot:<region>:<accountid>:command/<commandId>
A job identifier which must be unique for your Amazon Web Services account. We recommend using a UUID. Alpha-numeric characters, \"-\" and \"_\" are valid for use here.
", + "documentation":"A job identifier which must be unique for your account. We recommend using a UUID. Alpha-numeric characters, \"-\" and \"_\" are valid for use here.
", "location":"uri", "locationName":"jobId" }, @@ -8630,6 +9067,54 @@ }, "documentation":"The input for the DeleteCertificate operation.
" }, + "DeleteCommandExecutionRequest":{ + "type":"structure", + "required":[ + "executionId", + "targetArn" + ], + "members":{ + "executionId":{ + "shape":"CommandExecutionId", + "documentation":"The unique identifier of the command execution that you want to delete from your account.
", + "location":"uri", + "locationName":"executionId" + }, + "targetArn":{ + "shape":"TargetArn", + "documentation":"The Amazon Resource Number (ARN) of the target device for which you want to delete command executions.
", + "location":"querystring", + "locationName":"targetArn" + } + } + }, + "DeleteCommandExecutionResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteCommandRequest":{ + "type":"structure", + "required":["commandId"], + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier of the command to be deleted.
", + "location":"uri", + "locationName":"commandId" + } + } + }, + "DeleteCommandResponse":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"StatusCode", + "documentation":"The status code for the command deletion request. The status code is in the 200 range for a successful request.
If the command hasn't been deprecated, or has been deprecated for a duration that is shorter than the maximum time out duration of 12 hours, when calling the DeleteCommand
request, the deletion will be scheduled and a 202 status code will be returned. While the command is being deleted, it will be in a pendingDeletion
state. Once the time out duration has been reached, the command will be permanently removed from your account.
If the command has been deprecated for a duration that is longer than the maximum time out duration of 12 hours, when calling the DeleteCommand
request, the command will be deleted immediately and a 204 status code will be returned.
The output for the DeprecateThingType operation.
" }, "DeprecationDate":{"type":"timestamp"}, + "DeprecationFlag":{"type":"boolean"}, "DescribeAccountAuditConfigurationRequest":{ "type":"structure", "members":{ @@ -9926,7 +10412,7 @@ }, "beforeSubstitution":{ "shape":"BeforeSubstitutionFlag", - "documentation":"A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values.
", + "documentation":"Provides a view of the job document before and after the substitution parameters have been resolved with their exact values.
", "location":"querystring", "locationName":"beforeSubstitution" } @@ -10943,6 +11429,11 @@ } }, "DisconnectReason":{"type":"string"}, + "DisplayName":{ + "type":"string", + "max":64, + "pattern":"[^\\p{C}]*" + }, "DocumentParameter":{ "type":"structure", "members":{ @@ -11023,6 +11514,7 @@ "CUSTOMER_MANAGED" ] }, + "DoubleParameterValue":{"type":"double"}, "DurationInMinutes":{ "type":"integer", "max":1430, @@ -11620,6 +12112,155 @@ } } }, + "GetCommandExecutionRequest":{ + "type":"structure", + "required":[ + "executionId", + "targetArn" + ], + "members":{ + "executionId":{ + "shape":"CommandExecutionId", + "documentation":"The unique identifier for the command execution. This information is returned as a response of the StartCommandExecution
API request.
The Amazon Resource Number (ARN) of the device on which the command execution is being performed.
", + "location":"querystring", + "locationName":"targetArn" + }, + "includeResult":{ + "shape":"BooleanWrapperObject", + "documentation":"Can be used to specify whether to include the result of the command execution in the GetCommandExecution
API response. Your device can use this field to provide additional information about the command execution. You only need to specify this field when using the AWS-IoT
namespace.
The unique identifier of the command execution.
" + }, + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Number (ARN) of the command. For example, arn:aws:iot:<region>:<accountid>:command/<commandId>
The Amazon Resource Number (ARN) of the device on which the command execution is being performed.
" + }, + "status":{ + "shape":"CommandExecutionStatus", + "documentation":"The status of the command execution. After your devices receive the command and start performing the operations specified in the command, it can use the UpdateCommandExecution
MQTT API to update the status information.
Your devices can use this parameter to provide additional context about the status of a command execution using a reason code and description.
" + }, + "result":{ + "shape":"CommandExecutionResultMap", + "documentation":"The result value for the current state of the command execution. The status provides information about the progress of the command execution. The device can use the result field to share additional details about the execution such as a return value of a remote function call.
If you use the AWS-IoT-FleetWise
namespace, then this field is not applicable in the API response.
The list of parameters that the StartCommandExecution
API used when performing the command on the device.
Specifies the amount of time in seconds that the device can take to finish a command execution. A timer starts when the command execution is created. If the command execution status is not set to another terminal state before the timer expires, it will automatically update to TIMED_OUT
.
The timestamp, when the command execution was created.
" + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command execution was last updated.
" + }, + "startedAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command execution was started.
" + }, + "completedAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command execution was completed.
" + }, + "timeToLive":{ + "shape":"DateType", + "documentation":"The time to live (TTL) parameter for the GetCommandExecution
API.
The unique identifier of the command for which you want to retrieve information.
", + "location":"uri", + "locationName":"commandId" + } + } + }, + "GetCommandResponse":{ + "type":"structure", + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier of the command.
" + }, + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Number (ARN) of the command. For example, arn:aws:iot:<region>:<accountid>:command/<commandId>
The namespace of the command.
" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"The user-friendly name in the console for the command.
" + }, + "description":{ + "shape":"CommandDescription", + "documentation":"A short text description of the command.
" + }, + "mandatoryParameters":{ + "shape":"CommandParameterList", + "documentation":"A list of parameters for the command created.
" + }, + "payload":{ + "shape":"CommandPayload", + "documentation":"The payload object that you provided for the command.
" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"The IAM role that allows access to retrieve information about the command.
" + }, + "createdAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command was created.
" + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"The timestamp, when the command was last updated.
" + }, + "deprecated":{ + "shape":"DeprecationFlag", + "documentation":"Indicates whether the command has been deprecated.
" + }, + "pendingDeletion":{ + "shape":"BooleanWrapperObject", + "documentation":"Indicates whether the command is being deleted.
" + } + } + }, "GetEffectivePoliciesRequest":{ "type":"structure", "members":{ @@ -11678,7 +12319,7 @@ }, "beforeSubstitution":{ "shape":"BeforeSubstitutionFlag", - "documentation":"A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values.
", + "documentation":"Provides a view of the job document before and after the substitution parameters have been resolved with their exact values.
", "location":"querystring", "locationName":"beforeSubstitution" } @@ -12343,6 +12984,7 @@ "max":128, "min":1 }, + "IntegerParameterValue":{"type":"integer"}, "InternalException":{ "type":"structure", "members":{ @@ -13730,6 +14372,112 @@ }, "documentation":"The output of the ListCertificates operation.
" }, + "ListCommandExecutionsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"CommandMaxResults", + "documentation":"The maximum number of results to return in this operation.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"To retrieve the next set of results, the nextToken
value from a previous response; otherwise null
to receive the first set of results.
The namespace of the command.
" + }, + "status":{ + "shape":"CommandExecutionStatus", + "documentation":"List all command executions for the device that have a particular status. For example, you can filter the list to display only command executions that have failed or timed out.
" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"Specify whether to list the command executions that were created in the ascending or descending order. By default, the API returns all commands in the descending order based on the start time or completion time of the executions, that are determined by the startTimeFilter
and completeTimeFilter
parameters.
List all command executions that started any time before or after the date and time that you specify. The date and time uses the format yyyy-MM-dd'T'HH:mm
.
List all command executions that completed any time before or after the date and time that you specify. The date and time uses the format yyyy-MM-dd'T'HH:mm
.
The Amazon Resource Number (ARN) of the target device. You can use this information to list all command executions for a particular device.
" + }, + "commandArn":{ + "shape":"CommandArn", + "documentation":"The Amazon Resource Number (ARN) of the command. You can use this information to list all command executions for a particular command.
" + } + } + }, + "ListCommandExecutionsResponse":{ + "type":"structure", + "members":{ + "commandExecutions":{ + "shape":"CommandExecutionSummaryList", + "documentation":"The list of command executions.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The token to use to get the next set of results, or null
if there are no additional results.
The maximum number of results to return in this operation. By default, the API returns up to a maximum of 25 results. You can override this default value to return up to a maximum of 100 results for this operation.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"To retrieve the next set of results, the nextToken
value from a previous response; otherwise null
to receive the first set of results.
The namespace of the command. By default, the API returns all commands that have been created for both AWS-IoT
and AWS-IoT-FleetWise
namespaces. You can override this default value if you want to return all commands that have been created only for a specific namespace.
A filter that can be used to display the list of commands that have a specific command parameter name.
", + "location":"querystring", + "locationName":"commandParameterName" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"Specify whether to list the commands that you have created in the ascending or descending order. By default, the API returns all commands in the descending order based on the time that they were created.
", + "location":"querystring", + "locationName":"sortOrder" + } + } + }, + "ListCommandsResponse":{ + "type":"structure", + "members":{ + "commands":{ + "shape":"CommandSummaryList", + "documentation":"The list of commands.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The token to use to get the next set of results, or null
if there are no additional results.
Describes the logging options payload.
" }, + "LongParameterValue":{"type":"long"}, "MachineLearningDetectionConfig":{ "type":"structure", "required":["confidenceLevel"], @@ -16129,6 +16878,10 @@ }, "documentation":"Set configurations for metrics export.
" }, + "MimeType":{ + "type":"string", + "min":1 + }, "Minimum":{"type":"double"}, "MinimumNumberOfExecutedThings":{ "type":"integer", @@ -18204,11 +18957,11 @@ }, "ocspLambdaArn":{ "shape":"OCSPLambdaArn", - "documentation":"The Amazon Resource Name (ARN) for a Lambda function that acts as a Request for Comments (RFC) 6960-compliant Online Certificate Status Protocol (OCSP) responder, supporting basic OCSP responses. The Lambda function accepts a JSON string that's Base64-encoded. Therefore, you must convert your OCSP response, which is typically in the Distinguished Encoding Rules (DER) format, into a JSON string that's Base64-encoded. The Lambda function's response is also a Base64-encoded JSON string and the response payload must not exceed 8 kilobytes (KiB) in size. The Lambda function must be in the same Amazon Web Services region and account as the domain configuration.
" + "documentation":"The Amazon Resource Name (ARN) for a Lambda function that acts as a Request for Comments (RFC) 6960-compliant Online Certificate Status Protocol (OCSP) responder, supporting basic OCSP responses. The Lambda function accepts a base64-encoding of the OCSP request in the Distinguished Encoding Rules (DER) format. The Lambda function's response is also a base64-encoded OCSP response in the DER format. The response size must not exceed 4 kilobytes (KiB). The Lambda function must be in the same Amazon Web Services account and region as the domain configuration. For more information, see Configuring server certificate OCSP for private endpoints in Amazon Web Services IoT Core from the Amazon Web Services IoT Core developer guide.
" }, "ocspAuthorizedResponderArn":{ "shape":"AcmCertificateArn", - "documentation":"The Amazon Resource Name (ARN) for an X.509 certificate stored in Amazon Web Services Certificate Manager (ACM). If provided, Amazon Web Services IoT Core will use this certificate to validate the signature of the received OCSP response. The OCSP responder must sign responses using either this authorized responder certificate or the issuing certificate, depending on whether the ARN is provided or not. The certificate must be in the same Amazon Web Services region and account as the domain configuration.
" + "documentation":"The Amazon Resource Name (ARN) for an X.509 certificate stored in Amazon Web Services Certificate Manager (ACM). If provided, Amazon Web Services IoT Core will use this certificate to validate the signature of the received OCSP response. The OCSP responder must sign responses using either this authorized responder certificate or the issuing certificate, depending on whether the ARN is provided or not. The certificate must be in the same Amazon Web Services account and region as the domain configuration.
" } }, "documentation":"The server certificate configuration.
" @@ -18255,7 +19008,7 @@ "members":{ "message":{"shape":"errorMessage"} }, - "documentation":"A limit has been exceeded.
", + "documentation":"Service quota has been exceeded.
", "error":{"httpStatusCode":402}, "exception":true }, @@ -18458,6 +19211,13 @@ "type":"string", "max":350 }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "SqlParseException":{ "type":"structure", "members":{ @@ -18723,6 +19483,32 @@ "Cancelling" ] }, + "StatusCode":{"type":"integer"}, + "StatusReason":{ + "type":"structure", + "required":["reasonCode"], + "members":{ + "reasonCode":{ + "shape":"StatusReasonCode", + "documentation":"A code that provides additional context for the command execution status.
" + }, + "reasonDescription":{ + "shape":"StatusReasonDescription", + "documentation":"A literal string for devices to optionally provide additional information about the reason code for a command execution status.
" + } + }, + "documentation":"Provide additional context about the status of a command execution using a reason code and description.
" + }, + "StatusReasonCode":{ + "type":"string", + "max":64, + "pattern":"[A-Z0-9_-]+" + }, + "StatusReasonDescription":{ + "type":"string", + "max":1024, + "pattern":"[^\\p{C}]*" + }, "StdDeviation":{"type":"double"}, "StepFunctionsAction":{ "type":"structure", @@ -18880,6 +19666,10 @@ "member":{"shape":"StreamSummary"} }, "String":{"type":"string"}, + "StringCommandExecutionResult":{ + "type":"string", + "min":1 + }, "StringDateTime":{ "type":"string", "max":64, @@ -18894,6 +19684,10 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "StringParameterValue":{ + "type":"string", + "min":1 + }, "SubnetId":{"type":"string"}, "SubnetIdList":{ "type":"list", @@ -19606,6 +20400,20 @@ "error":{"httpStatusCode":400}, "exception":true }, + "TimeFilter":{ + "type":"structure", + "members":{ + "after":{ + "shape":"StringDateTime", + "documentation":"Filter to display command executions that started or completed only after a particular date and time.
" + }, + "before":{ + "shape":"StringDateTime", + "documentation":"Filter to display command executions that started or completed only before a particular date and time.
" + } + }, + "documentation":"A filter that can be used to list command executions for a device that started or completed before or after a particular date and time.
" + }, "TimedOutThings":{"type":"integer"}, "TimeoutConfig":{ "type":"structure", @@ -20061,6 +20869,12 @@ "type":"long", "min":0 }, + "UnsignedLongParameterValue":{ + "type":"string", + "max":20, + "min":1, + "pattern":"^[0-9]*$" + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -20309,6 +21123,55 @@ }, "documentation":"The input for the UpdateCertificate operation.
" }, + "UpdateCommandRequest":{ + "type":"structure", + "required":["commandId"], + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier of the command to be updated.
", + "location":"uri", + "locationName":"commandId" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"The new user-friendly name to use in the console for the command.
" + }, + "description":{ + "shape":"CommandDescription", + "documentation":"A short text description of the command.
" + }, + "deprecated":{ + "shape":"DeprecationFlag", + "documentation":"A boolean that you can use to specify whether to deprecate a command.
" + } + } + }, + "UpdateCommandResponse":{ + "type":"structure", + "members":{ + "commandId":{ + "shape":"CommandId", + "documentation":"The unique identifier of the command.
" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"The updated user-friendly display name in the console for the command.
" + }, + "description":{ + "shape":"CommandDescription", + "documentation":"The updated text description of the command.
" + }, + "deprecated":{ + "shape":"DeprecationFlag", + "documentation":"The boolean that indicates whether the command was deprecated.
" + }, + "lastUpdatedAt":{ + "shape":"DateType", + "documentation":"The date and time (epoch timestamp in seconds) when the command was last updated.
" + } + } + }, "UpdateCustomMetricRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/iotfleetwise/2021-06-17/paginators-1.json b/botocore/data/iotfleetwise/2021-06-17/paginators-1.json index ff157ff175..cc8dcba683 100644 --- a/botocore/data/iotfleetwise/2021-06-17/paginators-1.json +++ b/botocore/data/iotfleetwise/2021-06-17/paginators-1.json @@ -77,6 +77,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "vehicles" + }, + "ListStateTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "summaries" } } } diff --git a/botocore/data/iotfleetwise/2021-06-17/service-2.json b/botocore/data/iotfleetwise/2021-06-17/service-2.json index 134d566b0d..e24b9f7182 100644 --- a/botocore/data/iotfleetwise/2021-06-17/service-2.json +++ b/botocore/data/iotfleetwise/2021-06-17/service-2.json @@ -2,9 +2,11 @@ "version":"2.0", "metadata":{ "apiVersion":"2021-06-17", + "auth":["aws.auth#sigv4"], "endpointPrefix":"iotfleetwise", "jsonVersion":"1.0", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS IoT FleetWise", "serviceId":"IoTFleetWise", "signatureVersion":"v4", @@ -82,7 +84,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Creates an orchestration of data collection rules. The Amazon Web Services IoT FleetWise Edge Agent software running in vehicles uses campaigns to decide how to collect and transfer data to the cloud. You create campaigns in the cloud. After you or your team approve campaigns, Amazon Web Services IoT FleetWise automatically deploys them to vehicles.
For more information, see Collect and transfer data with campaigns in the Amazon Web Services IoT FleetWise Developer Guide.
", + "documentation":"Creates an orchestration of data collection rules. The Amazon Web Services IoT FleetWise Edge Agent software running in vehicles uses campaigns to decide how to collect and transfer data to the cloud. You create campaigns in the cloud. After you or your team approve campaigns, Amazon Web Services IoT FleetWise automatically deploys them to vehicles.
For more information, see Collect and transfer data with campaigns in the Amazon Web Services IoT FleetWise Developer Guide.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Creates a collection of standardized signals that can be reused to create vehicle models.
", "idempotent":true }, + "CreateStateTemplate":{ + "name":"CreateStateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStateTemplateRequest"}, + "output":{"shape":"CreateStateTemplateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidSignalsException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Creates a state template. State templates contain state properties, which are signals that belong to a signal catalog that is synchronized between the Amazon Web Services IoT FleetWise Edge and the Amazon Web Services Cloud.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Deletes a decoder manifest. You can't delete a decoder manifest if it has vehicles associated with it.
If the decoder manifest is successfully deleted, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Deletes a decoder manifest. You can't delete a decoder manifest if it has vehicles associated with it.
", "idempotent":true }, "DeleteFleet":{ @@ -238,7 +261,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Deletes a fleet. Before you delete a fleet, all vehicles must be dissociated from the fleet. For more information, see Delete a fleet (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.
If the fleet is successfully deleted, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Deletes a fleet. Before you delete a fleet, all vehicles must be dissociated from the fleet. For more information, see Delete a fleet (AWS CLI) in the Amazon Web Services IoT FleetWise Developer Guide.
", "idempotent":true }, "DeleteModelManifest":{ @@ -256,7 +279,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Deletes a vehicle model (model manifest).
If the vehicle model is successfully deleted, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Deletes a vehicle model (model manifest).
", "idempotent":true }, "DeleteSignalCatalog":{ @@ -274,7 +297,24 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Deletes a signal catalog.
If the signal catalog is successfully deleted, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Deletes a signal catalog.
", + "idempotent":true + }, + "DeleteStateTemplate":{ + "name":"DeleteStateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteStateTemplateRequest"}, + "output":{"shape":"DeleteStateTemplateResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Deletes a state template.
", "idempotent":true }, "DeleteVehicle":{ @@ -291,7 +331,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Deletes a vehicle and removes it from any campaigns.
If the vehicle is successfully deleted, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Deletes a vehicle and removes it from any campaigns.
", "idempotent":true }, "DisassociateVehicleFleet":{ @@ -309,7 +349,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Removes, or disassociates, a vehicle from a fleet. Disassociating a vehicle from a fleet doesn't delete the vehicle.
If the vehicle is successfully dissociated from a fleet, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty body.
Removes, or disassociates, a vehicle from a fleet. Disassociating a vehicle from a fleet doesn't delete the vehicle.
" }, "GetCampaign":{ "name":"GetCampaign", @@ -326,7 +366,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Retrieves information about a campaign.
" + "documentation":"Retrieves information about a campaign.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Retrieves information about a signal catalog.
" }, + "GetStateTemplate":{ + "name":"GetStateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStateTemplateRequest"}, + "output":{"shape":"GetStateTemplateResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Retrieves information about a state template.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Retrieves information about the status of a vehicle with any associated campaigns.
" + "documentation":"Retrieves information about the status of campaigns, decoder manifests, or state templates associated with a vehicle.
" }, "ImportDecoderManifest":{ "name":"ImportDecoderManifest", @@ -497,7 +554,7 @@ {"shape":"InvalidSignalsException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Creates a decoder manifest using your existing CAN DBC file from your local device.
" + "documentation":"Creates a decoder manifest using your existing CAN DBC file from your local device.
The CAN signal name must be unique and not repeated across CAN message definitions in a .dbc file.
" }, "ImportSignalCatalog":{ "name":"ImportSignalCatalog", @@ -688,6 +745,22 @@ ], "documentation":"Lists all the created signal catalogs in an Amazon Web Services account.
You can use to list information about each signal (node) specified in a signal catalog.
This API operation uses pagination. Specify the nextToken
parameter in the request to return more results.
Lists information about created state templates.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Updates the description of an existing fleet.
If the fleet is successfully updated, Amazon Web Services IoT FleetWise sends back an HTTP 200 response with an empty HTTP body.
Updates the description of an existing fleet.
" }, "UpdateModelManifest":{ "name":"UpdateModelManifest", @@ -928,6 +1001,26 @@ "documentation":"Updates a signal catalog.
", "idempotent":true }, + "UpdateStateTemplate":{ + "name":"UpdateStateTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateStateTemplateRequest"}, + "output":{"shape":"UpdateStateTemplateResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidSignalsException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Updates a state template.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Information about a collection scheme that uses a simple logical expression to recognize what data to collect.
" }, + "ConditionBasedSignalFetchConfig":{ + "type":"structure", + "required":[ + "conditionExpression", + "triggerMode" + ], + "members":{ + "conditionExpression":{ + "shape":"fetchConfigEventExpression", + "documentation":"The condition that must be satisfied to trigger a signal fetch.
" + }, + "triggerMode":{ + "shape":"TriggerMode", + "documentation":"Indicates the mode in which the signal fetch is triggered.
" + } + }, + "documentation":"Specifies the condition under which a signal fetch occurs.
" + }, "ConflictException":{ "type":"structure", "required":[ @@ -1430,37 +1541,37 @@ }, "startTime":{ "shape":"timestamp", - "documentation":"(Optional) The time, in milliseconds, to deliver a campaign after it was approved. If it's not specified, 0
is used.
Default: 0
The time, in milliseconds, to deliver a campaign after it was approved. If it's not specified, 0
is used.
Default: 0
(Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires.
Default: 253402214400 (December 31, 9999, 00:00:00 UTC)
" + "documentation":"The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires.
Default: 253402214400 (December 31, 9999, 00:00:00 UTC)
" }, "postTriggerCollectionDuration":{ "shape":"uint32", - "documentation":" (Optional) How long (in milliseconds) to collect raw data after a triggering event initiates the collection. If it's not specified, 0
is used.
Default: 0
How long (in milliseconds) to collect raw data after a triggering event initiates the collection. If it's not specified, 0
is used.
Default: 0
(Optional) Option for a vehicle to send diagnostic trouble codes to Amazon Web Services IoT FleetWise. If you want to send diagnostic trouble codes, use SEND_ACTIVE_DTCS
. If it's not specified, OFF
is used.
Default: OFF
Option for a vehicle to send diagnostic trouble codes to Amazon Web Services IoT FleetWise. If you want to send diagnostic trouble codes, use SEND_ACTIVE_DTCS
. If it's not specified, OFF
is used.
Default: OFF
(Optional) Whether to store collected data after a vehicle lost a connection with the cloud. After a connection is re-established, the data is automatically forwarded to Amazon Web Services IoT FleetWise. If you want to store collected data when a vehicle loses connection with the cloud, use TO_DISK
. If it's not specified, OFF
is used.
Default: OFF
Determines whether to store collected data after a vehicle lost a connection with the cloud. After a connection is re-established, the data is automatically forwarded to Amazon Web Services IoT FleetWise. If you want to store collected data when a vehicle loses connection with the cloud, use TO_DISK
. If it's not specified, OFF
is used.
Default: OFF
(Optional) Whether to compress signals before transmitting data to Amazon Web Services IoT FleetWise. If you don't want to compress the signals, use OFF
. If it's not specified, SNAPPY
is used.
Default: SNAPPY
Determines whether to compress signals before transmitting data to Amazon Web Services IoT FleetWise. If you don't want to compress the signals, use OFF
. If it's not specified, SNAPPY
is used.
Default: SNAPPY
(Optional) A number indicating the priority of one campaign over another campaign for a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles before any other campaigns. If it's not specified, 0
is used.
Default: 0
A number indicating the priority of one campaign over another campaign for a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles before any other campaigns. If it's not specified, 0
is used.
Default: 0
(Optional) A list of information about signals to collect.
" + "documentation":"A list of information about signals to collect.
If you upload a signal as a condition in a data partition for a campaign, then those same signals must be included in signalsToCollect
.
(Optional) A list of vehicle attributes to associate with a campaign.
Enrich the data with specified vehicle attributes. For example, add make
and model
to the campaign, and Amazon Web Services IoT FleetWise will associate the data with those attributes as dimensions in Amazon Timestream. You can then query the data against make
and model
.
Default: An empty array
" + "documentation":"A list of vehicle attributes to associate with a campaign.
Enrich the data with specified vehicle attributes. For example, add make
and model
to the campaign, and Amazon Web Services IoT FleetWise will associate the data with those attributes as dimensions in Amazon Timestream. You can then query the data against make
and model
.
Default: An empty array
" }, "tags":{ "shape":"TagList", @@ -1476,7 +1587,15 @@ }, "dataDestinationConfigs":{ "shape":"DataDestinationConfigs", - "documentation":"The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream.
Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics. Amazon Web Services IoT FleetWise supports at-least-once file delivery to S3. Your vehicle data is stored on multiple Amazon Web Services IoT FleetWise servers for redundancy and high availability.
You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
" + "documentation":"The destination where the campaign sends data. You can send data to an MQTT topic, or store it in Amazon S3 or Amazon Timestream.
MQTT is the publish/subscribe messaging protocol used by Amazon Web Services IoT to communicate with your devices.
Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics. Amazon Web Services IoT FleetWise supports at-least-once file delivery to S3. Your vehicle data is stored on multiple Amazon Web Services IoT FleetWise servers for redundancy and high availability.
You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
" + }, + "dataPartitions":{ + "shape":"DataPartitions", + "documentation":"The data partitions associated with the signals collected from the vehicle.
" + }, + "signalsToFetch":{ + "shape":"SignalFetchInformationList", + "documentation":"A list of information about signals to fetch.
" } } }, @@ -1506,7 +1625,7 @@ }, "description":{ "shape":"description", - "documentation":"A brief description of the decoder manifest.
" + "documentation":"A brief description of the decoder manifest.
" }, "modelManifestArn":{ "shape":"arn", @@ -1520,6 +1639,10 @@ "shape":"NetworkInterfaces", "documentation":"A list of information about available network interfaces.
" }, + "defaultForUnmappedSignals":{ + "shape":"DefaultForUnmappedSignalsType", + "documentation":"Use default decoders for all unmapped signals in the model. You don't need to provide any detailed decoding information.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Metadata that can be used to manage the decoder manifest.
" @@ -1671,6 +1794,61 @@ } } }, + "CreateStateTemplateRequest":{ + "type":"structure", + "required":[ + "name", + "signalCatalogArn", + "stateTemplateProperties" + ], + "members":{ + "name":{ + "shape":"resourceName", + "documentation":"The name of the state template.
" + }, + "description":{ + "shape":"description", + "documentation":"A brief description of the state template.
" + }, + "signalCatalogArn":{ + "shape":"arn", + "documentation":"The ARN of the signal catalog associated with the state template.
" + }, + "stateTemplateProperties":{ + "shape":"StateTemplateProperties", + "documentation":"A list of signals from which data is collected. The state template properties contain the fully qualified names of the signals.
" + }, + "dataExtraDimensions":{ + "shape":"StateTemplateDataExtraDimensionNodePathList", + "documentation":"A list of vehicle attributes to associate with the payload published on the state template's MQTT topic. (See Processing last known state vehicle data using MQTT messaging). For example, if you add Vehicle.Attributes.Make
and Vehicle.Attributes.Model
attributes, Amazon Web Services IoT FleetWise will enrich the protobuf encoded payload with those attributes in the extraDimensions
field.
A list of vehicle attributes to associate with user properties of the messages published on the state template's MQTT topic. (See Processing last known state vehicle data using MQTT messaging). For example, if you add Vehicle.Attributes.Make
and Vehicle.Attributes.Model
attributes, Amazon Web Services IoT FleetWise will include these attributes as User Properties with the MQTT message.
Default: An empty array
" + }, + "tags":{ + "shape":"TagList", + "documentation":"Metadata that can be used to manage the state template.
" + } + } + }, + "CreateStateTemplateResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"resourceName", + "documentation":"The name of the state template.
" + }, + "arn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the state template.
" + }, + "id":{ + "shape":"ResourceUniqueId", + "documentation":"The unique ID of the state template.
" + } + } + }, "CreateVehicleError":{ "type":"structure", "members":{ @@ -1711,7 +1889,7 @@ }, "attributes":{ "shape":"attributesMap", - "documentation":"Static information about a vehicle in a key-value pair. For example: \"engineType\"
: \"1.3 L R2\"
A campaign must include the keys (attribute names) in dataExtraDimensions
for them to display in Amazon Timestream.
Static information about a vehicle in a key-value pair. For example: \"engineType\"
: \"1.3 L R2\"
To use attributes with Campaigns or State Templates, you must include them using the request parameters dataExtraDimensions
and/or metadataExtraDimensions
(for state templates only) when creating your campaign/state template.
Metadata that can be used to manage the vehicle.
" + }, + "stateTemplates":{ + "shape":"StateTemplateAssociations", + "documentation":"Associate state templates with the vehicle. You can monitor the last known state of the vehicle in near real time.
" } } }, @@ -1754,6 +1936,10 @@ "tags":{ "shape":"TagList", "documentation":"Metadata which can be used to manage the vehicle.
" + }, + "stateTemplates":{ + "shape":"StateTemplateAssociations", + "documentation":"Associate state templates to track the state of the vehicle. State templates determine which signal updates the vehicle sends to the cloud.
" } }, "documentation":"Information about the vehicle to create.
" @@ -1793,6 +1979,40 @@ }, "documentation":"Information about a created vehicle.
" }, + "CustomDecodingId":{ + "type":"string", + "max":150, + "min":1, + "pattern":"(?!.*\\.\\.)[a-zA-Z0-9_\\-#:.]+" + }, + "CustomDecodingInterface":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"CustomDecodingSignalInterfaceName", + "documentation":"The name of the interface.
" + } + }, + "documentation":"Represents a custom network interface as defined by the customer.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The ID of the signal.
" + } + }, + "documentation":"Information about signals using a custom decoding protocol as defined by the customer.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The Amazon Timestream table where the campaign sends data.
" + }, + "mqttTopicConfig":{ + "shape":"MqttTopicConfig", + "documentation":"The MQTT topic to which the Amazon Web Services IoT FleetWise campaign routes data.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The destination where the Amazon Web Services IoT FleetWise campaign sends data. You can send data to be stored in Amazon S3 or Amazon Timestream.
", + "documentation":"The destination where the campaign sends data. You can send data to an MQTT topic, or store it in Amazon S3 or Amazon Timestream.
", "union":true }, "DataDestinationConfigs":{ @@ -1889,6 +2113,78 @@ "PARQUET" ] }, + "DataPartition":{ + "type":"structure", + "required":[ + "id", + "storageOptions" + ], + "members":{ + "id":{ + "shape":"DataPartitionId", + "documentation":"The ID of the data partition. The data partition ID must be unique within a campaign. You can establish a data partition as the default partition for a campaign by using default
as the ID.
The storage options for a data partition.
" + }, + "uploadOptions":{ + "shape":"DataPartitionUploadOptions", + "documentation":"The upload options for the data partition.
" + } + }, + "documentation":"The configuration for signal data storage and upload options. You can only specify these options when the campaign's spooling mode is TO_DISK
.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The maximum storage size of the data stored in the data partition.
Newer data overwrites older data when the partition reaches the maximum size.
The folder name for the data partition under the campaign storage folder.
" + }, + "minimumTimeToLive":{ + "shape":"StorageMinimumTimeToLive", + "documentation":"The amount of time that data in this partition will be kept on disk.
After the designated amount of time passes, the data can be removed, but it's not guaranteed to be removed.
Before the time expires, data in this partition can still be deleted if the partition reaches its configured maximum size.
Newer data will overwrite older data when the partition reaches the maximum size.
Size, time, and location options for the data partition.
" + }, + "DataPartitionUploadOptions":{ + "type":"structure", + "required":["expression"], + "members":{ + "expression":{ + "shape":"eventExpression", + "documentation":"The logical expression used to recognize what data to collect. For example, $variable.`Vehicle.OutsideAirTemperature` >= 105.0
.
The version of the condition language. Defaults to the most recent condition language version.
" + } + }, + "documentation":"The upload options for the data partition. If upload options are specified, you must also specify storage options. See DataPartitionStorageOptions.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The request couldn't be completed because it contains signal decoders with one or more validation errors.
", "exception":true }, + "DefaultForUnmappedSignalsType":{ + "type":"string", + "enum":["CUSTOM_DECODING"] + }, "DeleteCampaignRequest":{ "type":"structure", "required":["name"], @@ -2074,6 +2374,33 @@ } } }, + "DeleteStateTemplateRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"ResourceIdentifier", + "documentation":"A unique, service-generated identifier.
" + } + } + }, + "DeleteStateTemplateResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"resourceName", + "documentation":"The name of the state template.
" + }, + "arn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the state template.
" + }, + "id":{ + "shape":"ResourceUniqueId", + "documentation":"The unique ID of the state template.
" + } + } + }, "DeleteVehicleRequest":{ "type":"structure", "required":["vehicleName"], @@ -2145,6 +2472,12 @@ "FLEETWISE_DEFAULT_ENCRYPTION" ] }, + "EventExpressionList":{ + "type":"list", + "member":{"shape":"actionEventExpression"}, + "max":2, + "min":1 + }, "FleetSummary":{ "type":"structure", "required":[ @@ -2290,7 +2623,15 @@ }, "dataDestinationConfigs":{ "shape":"DataDestinationConfigs", - "documentation":"The destination where the campaign sends data. You can choose to send data to be stored in Amazon S3 or Amazon Timestream.
Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics.
You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
" + "documentation":"The destination where the campaign sends data. You can send data to an MQTT topic, or store it in Amazon S3 or Amazon Timestream.
MQTT is the publish/subscribe messaging protocol used by Amazon Web Services IoT to communicate with your devices.
Amazon S3 optimizes the cost of data storage and provides additional mechanisms to use vehicle data, such as data lakes, centralized data storage, data processing pipelines, and analytics.
You can use Amazon Timestream to access and analyze time series data, and Timestream to query vehicle data so that you can identify trends and patterns.
" + }, + "dataPartitions":{ + "shape":"DataPartitions", + "documentation":"The data partitions associated with the signals collected from the vehicle.
" + }, + "signalsToFetch":{ + "shape":"SignalFetchInformationList", + "documentation":"Information about a list of signals to fetch data from.
" } } }, @@ -2581,6 +2922,61 @@ } } }, + "GetStateTemplateRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"ResourceIdentifier", + "documentation":"A unique, service-generated identifier.
" + } + } + }, + "GetStateTemplateResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"resourceName", + "documentation":"The name of the state template.
" + }, + "arn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the state template.
" + }, + "description":{ + "shape":"description", + "documentation":"A brief description of the state template.
" + }, + "signalCatalogArn":{ + "shape":"arn", + "documentation":"The ARN of the signal catalog associated with the state template.
" + }, + "stateTemplateProperties":{ + "shape":"StateTemplateProperties", + "documentation":"A list of signals from which data is collected. The state template properties contain the fully qualified names of the signals.
" + }, + "dataExtraDimensions":{ + "shape":"StateTemplateDataExtraDimensionNodePathList", + "documentation":"A list of vehicle attributes associated with the payload published on the state template's MQTT topic.
Default: An empty array
" + }, + "metadataExtraDimensions":{ + "shape":"StateTemplateMetadataExtraDimensionNodePathList", + "documentation":"A list of vehicle attributes to associate with user properties of the messages published on the state template's MQTT topic.
Default: An empty array
" + }, + "creationTime":{ + "shape":"timestamp", + "documentation":"The time the state template was created in seconds since epoch (January 1, 1970 at midnight UTC time).
" + }, + "lastModificationTime":{ + "shape":"timestamp", + "documentation":"The time the state template was last updated in seconds since epoch (January 1, 1970 at midnight UTC time).
" + }, + "id":{ + "shape":"ResourceUniqueId", + "documentation":"The unique ID of the state template.
" + } + } + }, "GetVehicleRequest":{ "type":"structure", "required":["vehicleName"], @@ -2614,6 +3010,10 @@ "shape":"attributesMap", "documentation":"Static information about a vehicle in a key-value pair. For example:
\"engineType\"
: \"1.3 L R2\"
State templates associated with the vehicle.
" + }, "creationTime":{ "shape":"timestamp", "documentation":"The time the vehicle was created in seconds since epoch (January 1, 1970 at midnight UTC time).
" @@ -2630,11 +3030,11 @@ "members":{ "nextToken":{ "shape":"nextToken", - "documentation":"A pagination token for the next set of results.
If the results of a search are large, only a portion of the results are returned, and a nextToken
pagination token is returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
A pagination token for the next set of results.
If the results of a search are large, only a portion of the results are returned, and a nextToken
pagination token is returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value. This parameter is only supported for resources of type CAMPAIGN
.
The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive. This parameter is only supported for resources of type CAMPAIGN
.
The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" }, "status":{ "shape":"statusStr", - "documentation":"Optional parameter to filter the results by the status of each created campaign in your account. The status can be one of: CREATING
, WAITING_FOR_APPROVAL
, RUNNING
, or SUSPENDED
.
An optional parameter to filter the results by the status of each created campaign in your account. The status can be one of: CREATING
, WAITING_FOR_APPROVAL
, RUNNING
, or SUSPENDED
.
The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -2974,7 +3374,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3004,7 +3404,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3035,7 +3435,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3061,7 +3461,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3092,7 +3492,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3122,7 +3522,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3153,7 +3553,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" }, "signalNodeType":{ "shape":"SignalNodeType", @@ -3183,7 +3583,7 @@ }, "maxResults":{ "shape":"maxResults", - "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3200,6 +3600,32 @@ } } }, + "ListStateTemplatesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"nextToken", + "documentation":" The token to retrieve the next set of results, or null
if there are no more results.
The maximum number of items to return, between 1 and 100, inclusive.
" + } + } + }, + "ListStateTemplatesResponse":{ + "type":"structure", + "members":{ + "summaries":{ + "shape":"StateTemplateSummaries", + "documentation":"A list of information about each state template.
" + }, + "nextToken":{ + "shape":"nextToken", + "documentation":" The token to retrieve the next set of results, or null
if there are no more results.
The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3259,11 +3685,11 @@ }, "attributeNames":{ "shape":"attributeNamesList", - "documentation":"The fully qualified names of the attributes. For example, the fully qualified name of an attribute might be Vehicle.Body.Engine.Type
.
The fully qualified names of the attributes. You can use this optional parameter to list the vehicles containing all the attributes in the request. For example, attributeNames
could be \"Vehicle.Body.Engine.Type, Vehicle.Color
\" and the corresponding attributeValues
could be \"1.3 L R2, Blue
\" . In this case, the API will filter vehicles with an attribute name Vehicle.Body.Engine.Type
that contains a value of 1.3 L R2
AND an attribute name Vehicle.Color
that contains a value of \"Blue
\". A request must contain unique values for the attributeNames
filter and the matching number of attributeValues
filters to return the subset of vehicles that match the attributes filter condition.
Static information about a vehicle attribute value in string format. For example:
\"1.3 L R2\"
Static information about a vehicle attribute value in string format. You can use this optional parameter in conjunction with attributeNames
to list the vehicles containing all the attributeValues
corresponding to the attributeNames
filter. For example, attributeValues
could be \"1.3 L R2, Blue
\" and the corresponding attributeNames
filter could be \"Vehicle.Body.Engine.Type, Vehicle.Color
\". In this case, the API will filter vehicles with attribute name Vehicle.Body.Engine.Type
that contains a value of 1.3 L R2
AND an attribute name Vehicle.Color
that contains a value of \"Blue
\". A request must contain unique values for the attributeNames
filter and the matching number of attributeValues
filter to return the subset of vehicles that match the attributes filter condition.
The maximum number of items to return, between 1 and 100, inclusive.
" + "documentation":"The maximum number of items to return, between 1 and 100, inclusive.
" } } }, @@ -3365,6 +3791,30 @@ "key":{"shape":"string"}, "value":{"shape":"string"} }, + "MqttTopicArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.*" + }, + "MqttTopicConfig":{ + "type":"structure", + "required":[ + "mqttTopicArn", + "executionRoleArn" + ], + "members":{ + "mqttTopicArn":{ + "shape":"MqttTopicArn", + "documentation":"The ARN of the MQTT topic.
" + }, + "executionRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"The ARN of the role that grants Amazon Web Services IoT FleetWise permission to access and act on messages sent to the MQTT topic.
" + } + }, + "documentation":"The MQTT topic to which the Amazon Web Services IoT FleetWise campaign routes data. For more information, see Device communication protocols in the Amazon Web Services IoT Core Developer Guide.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Information about a network interface specified by the On-board diagnostic (OBD) II protocol.
" + "documentation":"Information about a network interface specified by the on-board diagnostic (OBD) II protocol.
" }, "vehicleMiddleware":{ "shape":"VehicleMiddleware", "documentation":"The vehicle middleware defined as a type of network interface. Examples of vehicle middleware include ROS2
and SOME/IP
.
Information about a custom network interface.
" } }, "documentation":"Represents a node and its specifications in an in-vehicle communication network. All signal decoders must be associated with a network node.
To return this information about all the network interfaces specified in a decoder manifest, use the API operation.
" @@ -3430,7 +3884,8 @@ "CAN_NETWORK_INTERFACE_INFO_IS_NULL", "OBD_NETWORK_INTERFACE_INFO_IS_NULL", "NETWORK_INTERFACE_TO_REMOVE_ASSOCIATED_WITH_SIGNALS", - "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL" + "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL", + "CUSTOM_DECODING_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL" ] }, "NetworkInterfaceType":{ @@ -3438,7 +3893,8 @@ "enum":[ "CAN_INTERFACE", "OBD_INTERFACE", - "VEHICLE_MIDDLEWARE" + "VEHICLE_MIDDLEWARE", + "CUSTOM_DECODING_INTERFACE" ] }, "NetworkInterfaces":{ @@ -3616,7 +4072,7 @@ "documentation":"Whether the vehicle has a transmission control module (TCM).
" } }, - "documentation":"A network interface that specifies the On-board diagnostic (OBD) II network protocol.
" + "documentation":"A network interface that specifies the on-board diagnostic (OBD) II network protocol.
" }, "ObdInterfaceName":{ "type":"string", @@ -3679,6 +4135,20 @@ "max":50, "min":1 }, + "OnChangeStateTemplateUpdateStrategy":{ + "type":"structure", + "members":{ + }, + "documentation":"Vehicles associated with the state template will stream telemetry data when there is a change.
" + }, + "PeriodicStateTemplateUpdateStrategy":{ + "type":"structure", + "required":["stateTemplateUpdateRate"], + "members":{ + "stateTemplateUpdateRate":{"shape":"TimePeriod"} + }, + "documentation":"Vehicles associated with the state template will stream telemetry data during a specified time period.
" + }, "Prefix":{ "type":"string", "max":512, @@ -3863,6 +4333,12 @@ "REGISTRATION_FAILURE" ] }, + "ResourceIdentifier":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z\\d\\-_:]+" + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -3884,6 +4360,12 @@ "documentation":"The resource wasn't found.
", "exception":true }, + "ResourceUniqueId":{ + "type":"string", + "max":26, + "min":26, + "pattern":"[A-Z0-9]+" + }, "RetryAfterSeconds":{"type":"integer"}, "S3BucketArn":{ "type":"string", @@ -3909,7 +4391,7 @@ }, "prefix":{ "shape":"Prefix", - "documentation":"(Optional) Enter an S3 bucket prefix. The prefix is the string of characters after the bucket name and before the object name. You can use the prefix to organize data stored in Amazon S3 buckets. For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.
By default, Amazon Web Services IoT FleetWise sets the prefix processed-data/year=YY/month=MM/date=DD/hour=HH/
(in UTC) to data it delivers to Amazon S3. You can enter a prefix to append it to this default prefix. For example, if you enter the prefix vehicles
, the prefix will be vehicles/processed-data/year=YY/month=MM/date=DD/hour=HH/
.
Enter an S3 bucket prefix. The prefix is the string of characters after the bucket name and before the object name. You can use the prefix to organize data stored in Amazon S3 buckets. For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide.
By default, Amazon Web Services IoT FleetWise sets the prefix processed-data/year=YY/month=MM/date=DD/hour=HH/
(in UTC) to data it delivers to Amazon S3. You can enter a prefix to append it to this default prefix. For example, if you enter the prefix vehicles
, the prefix will be vehicles/processed-data/year=YY/month=MM/date=DD/hour=HH/
.
The Amazon S3 bucket where the Amazon Web Services IoT FleetWise campaign sends data. Amazon S3 is an object storage service that stores data as objects within buckets. For more information, see Creating, configuring, and working with Amazon S3 buckets in the Amazon Simple Storage Service User Guide.
" @@ -4012,11 +4494,15 @@ }, "obdSignal":{ "shape":"ObdSignal", - "documentation":"Information about signal decoder using the On-board diagnostic (OBD) II protocol.
" + "documentation":"Information about signal decoder using the on-board diagnostic (OBD) II protocol.
" }, "messageSignal":{ "shape":"MessageSignal", "documentation":"The decoding information for a specific message which supports higher order data types.
" + }, + "customDecodingSignal":{ + "shape":"CustomDecodingSignal", + "documentation":"Information about a custom signal decoder.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Information about a signal decoder.
" @@ -4038,7 +4524,8 @@ "STRUCT_SIZE_MISMATCH", "NO_SIGNAL_IN_CATALOG_FOR_DECODER_SIGNAL", "SIGNAL_DECODER_INCOMPATIBLE_WITH_SIGNAL_CATALOG", - "EMPTY_MESSAGE_SIGNAL" + "EMPTY_MESSAGE_SIGNAL", + "CUSTOM_DECODING_SIGNAL_INFO_IS_NULL" ] }, "SignalDecoderType":{ @@ -4046,7 +4533,8 @@ "enum":[ "CAN_SIGNAL", "OBD_SIGNAL", - "MESSAGE_SIGNAL" + "MESSAGE_SIGNAL", + "CUSTOM_DECODING_SIGNAL" ] }, "SignalDecoders":{ @@ -4055,6 +4543,54 @@ "max":500, "min":1 }, + "SignalFetchConfig":{ + "type":"structure", + "members":{ + "timeBased":{ + "shape":"TimeBasedSignalFetchConfig", + "documentation":"The configuration of a time-based signal fetch operation.
" + }, + "conditionBased":{ + "shape":"ConditionBasedSignalFetchConfig", + "documentation":"The configuration of a condition-based signal fetch operation.
" + } + }, + "documentation":"The configuration of the signal fetch operation.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The fully qualified name of the signal to be fetched.
" + }, + "signalFetchConfig":{ + "shape":"SignalFetchConfig", + "documentation":"The configuration of the signal fetch operation.
" + }, + "conditionLanguageVersion":{ + "shape":"languageVersion", + "documentation":"The version of the condition language used.
" + }, + "actions":{ + "shape":"EventExpressionList", + "documentation":"The actions to be performed by the signal fetch.
" + } + }, + "documentation":"Information about the signal to be fetched.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The minimum duration of time (in milliseconds) between two triggering events to collect data.
If a signal changes often, you might want to collect data at a slower rate.
The ID of the data partition this signal is associated with.
The ID must match one of the IDs provided in dataPartitions
. This is accomplished either by specifying a particular data partition ID or by using default
for an established default partition. You can establish a default partition in the DataPartition
data type.
If you upload a signal as a condition for a campaign's data partition, the same signal must be included in signalsToCollect
.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Information about a signal.
" @@ -4099,6 +4639,98 @@ "TO_DISK" ] }, + "StateTemplateAssociation":{ + "type":"structure", + "required":[ + "identifier", + "stateTemplateUpdateStrategy" + ], + "members":{ + "identifier":{ + "shape":"ResourceIdentifier", + "documentation":"A unique, service-generated identifier.
" + }, + "stateTemplateUpdateStrategy":{"shape":"StateTemplateUpdateStrategy"} + }, + "documentation":"The state template associated with a vehicle. State templates contain state properties, which are signals that belong to a signal catalog that is synchronized between the Amazon Web Services IoT FleetWise Edge and the Amazon Web Services Cloud.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The name of the state template.
" + }, + "arn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the state template.
" + }, + "signalCatalogArn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the signal catalog associated with the state template.
" + }, + "description":{ + "shape":"description", + "documentation":"A brief description of the state template.
" + }, + "creationTime":{ + "shape":"timestamp", + "documentation":"The time the state template was created, in seconds since epoch (January 1, 1970 at midnight UTC time).
" + }, + "lastModificationTime":{ + "shape":"timestamp", + "documentation":"The time the state template was last updated, in seconds since epoch (January 1, 1970 at midnight UTC time).
" + }, + "id":{ + "shape":"ResourceUniqueId", + "documentation":"The unique ID of the state template.
" + } + }, + "documentation":"Information about a state template.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The update strategy for the state template. Vehicles associated with the state template can stream telemetry data with either an onChange
or periodic
update strategy.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The data type of the data to store.
" + }, + "value":{ + "shape":"StorageMaximumSizeValue", + "documentation":"The maximum amount of time to store data.
" + } + }, + "documentation":"The maximum storage size for the data partition.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
The time increment type.
" + }, + "value":{ + "shape":"StorageMinimumTimeToLiveValue", + "documentation":"The minimum amount of time to store the data.
" + } + }, + "documentation":"Information about the minimum amount of time that data will be kept.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Information about a collection scheme that uses a time period to decide how often to collect data.
" }, + "TimeBasedSignalFetchConfig":{ + "type":"structure", + "required":["executionFrequencyMs"], + "members":{ + "executionFrequencyMs":{ + "shape":"positiveLong", + "documentation":"The frequency with which the signal fetch will be executed.
" + } + }, + "documentation":"Used to configure a frequency-based vehicle signal fetch.
" + }, + "TimePeriod":{ + "type":"structure", + "required":[ + "unit", + "value" + ], + "members":{ + "unit":{ + "shape":"TimeUnit", + "documentation":"A unit of time.
" + }, + "value":{ + "shape":"positiveInteger", + "documentation":"A number of time units.
" + } + }, + "documentation":"The length of time between state template updates.
" + }, + "TimeUnit":{ + "type":"string", + "enum":[ + "MILLISECOND", + "SECOND", + "MINUTE", + "HOUR" + ] + }, "TimestreamConfig":{ "type":"structure", "required":[ @@ -4498,6 +5238,10 @@ "status":{ "shape":"ManifestStatus", "documentation":" The state of the decoder manifest. If the status is ACTIVE
, the decoder manifest can't be edited. If the status is DRAFT
, you can edit the decoder manifest.
Use default decoders for all unmapped signals in the model. You don't need to provide any detailed decoding information.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
A unique, service-generated identifier.
" + }, + "description":{ + "shape":"description", + "documentation":"A brief description of the state template.
" + }, + "stateTemplatePropertiesToAdd":{ + "shape":"StateTemplateProperties", + "documentation":"Add signals from which data is collected as part of the state template.
" + }, + "stateTemplatePropertiesToRemove":{ + "shape":"StateTemplateProperties", + "documentation":"Remove signals from which data is collected as part of the state template.
" + }, + "dataExtraDimensions":{ + "shape":"StateTemplateDataExtraDimensionNodePathList", + "documentation":"A list of vehicle attributes to associate with the payload published on the state template's MQTT topic. (See Processing last known state vehicle data using MQTT messaging). For example, if you add Vehicle.Attributes.Make
and Vehicle.Attributes.Model
attributes, Amazon Web Services IoT FleetWise will enrich the protobuf encoded payload with those attributes in the extraDimensions
field.
Default: An empty array
" + }, + "metadataExtraDimensions":{ + "shape":"StateTemplateMetadataExtraDimensionNodePathList", + "documentation":"A list of vehicle attributes to associate with user properties of the messages published on the state template's MQTT topic. (See Processing last known state vehicle data using MQTT messaging). For example, if you add Vehicle.Attributes.Make
and Vehicle.Attributes.Model
attributes, Amazon Web Services IoT FleetWise will include these attributes as User Properties with the MQTT message.
The name of the state template.
" + }, + "arn":{ + "shape":"arn", + "documentation":"The Amazon Resource Name (ARN) of the state template.
" + }, + "id":{ + "shape":"ResourceUniqueId", + "documentation":"The unique ID of the state template.
" + } + } + }, "UpdateVehicleError":{ "type":"structure", "members":{ @@ -4679,6 +5470,14 @@ "attributeUpdateMode":{ "shape":"UpdateMode", "documentation":"The method the specified attributes will update the existing attributes on the vehicle. UseOverwite
to replace the vehicle attributes with the specified attributes. Or use Merge
to combine all attributes.
This is required if attributes are present in the input.
" + }, + "stateTemplatesToAdd":{ + "shape":"StateTemplateAssociations", + "documentation":"Associate state templates with the vehicle.
" + }, + "stateTemplatesToRemove":{ + "shape":"StateTemplateAssociationIdentifiers", + "documentation":"Remove state templates from the vehicle.
" } } }, @@ -4705,6 +5504,14 @@ "attributeUpdateMode":{ "shape":"UpdateMode", "documentation":"The method the specified attributes will update the existing attributes on the vehicle. UseOverwite
to replace the vehicle attributes with the specified attributes. Or use Merge
to combine all attributes.
This is required if attributes are present in the input.
" + }, + "stateTemplatesToAdd":{ + "shape":"StateTemplateAssociations", + "documentation":"Associate additional state templates to track the state of the vehicle. State templates determine which signal updates the vehicle sends to the cloud.
" + }, + "stateTemplatesToRemove":{ + "shape":"StateTemplateAssociationIdentifiers", + "documentation":"Remove existing state template associations from the vehicle.
" } }, "documentation":"Information about the vehicle to update.
" @@ -4841,10 +5648,10 @@ }, "status":{ "shape":"VehicleState", - "documentation":"The state of a vehicle, which can be one of the following:
CREATED
- Amazon Web Services IoT FleetWise sucessfully created the vehicle.
READY
- The vehicle is ready to receive a campaign deployment.
HEALTHY
- A campaign deployment was delivered to the vehicle.
SUSPENDED
- A campaign associated with the vehicle was suspended and data collection was paused.
DELETING
- Amazon Web Services IoT FleetWise is removing a campaign from the vehicle.
The status of a campaign, which can be one of the following:
CREATED
- The campaign has been created successfully but has not been approved.
READY
- The campaign has been approved but has not been deployed to the vehicle.
HEALTHY
- The campaign has been deployed to the vehicle.
SUSPENDED
- The campaign has been suspended and data collection is paused.
DELETING
- The campaign is being removed from the vehicle.
Information about the state of a vehicle and how it relates to the status of a campaign.
" + "documentation":"Information about a campaign associated with a vehicle.
" }, "VehicleStatusList":{ "type":"list", @@ -4892,6 +5699,12 @@ }, "documentation":"Information about a vehicle.
To return this information about vehicles in your account, you can use the API operation.
" }, + "actionEventExpression":{ + "type":"string", + "max":100, + "min":1, + "sensitive":true + }, "arn":{"type":"string"}, "attributeName":{ "type":"string", @@ -4973,6 +5786,12 @@ "min":1, "sensitive":true }, + "fetchConfigEventExpression":{ + "type":"string", + "max":400, + "min":1, + "sensitive":true + }, "fleetId":{ "type":"string", "max":100, @@ -5040,6 +5859,11 @@ "box":true, "min":1 }, + "positiveLong":{ + "type":"long", + "box":true, + "min":1 + }, "priority":{ "type":"integer", "box":true, @@ -5104,5 +5928,5 @@ "pattern":"[\\w|*|-]+(\\.[\\w|*|-]+)*" } }, - "documentation":"Amazon Web Services IoT FleetWise is a fully managed service that you can use to collect, model, and transfer vehicle data to the Amazon Web Services cloud at scale. With Amazon Web Services IoT FleetWise, you can standardize all of your vehicle data models, independent of the in-vehicle communication architecture, and define data collection rules to transfer only high-value data to the cloud.
For more information, see What is Amazon Web Services IoT FleetWise? in the Amazon Web Services IoT FleetWise Developer Guide.
" + "documentation":"Amazon Web Services IoT FleetWise is a fully managed service that you can use to collect, model, and transfer vehicle data to the Amazon Web Services cloud at scale. With Amazon Web Services IoT FleetWise, you can standardize all of your vehicle data models, independent of the in-vehicle communication architecture, and define data collection rules to transfer only high-value data to the cloud.
For more information, see What is Amazon Web Services IoT FleetWise? in the Amazon Web Services IoT FleetWise Developer Guide.
Access to certain Amazon Web Services IoT FleetWise features is currently gated. For more information, see Amazon Web Services Region and feature availability in the Amazon Web Services IoT FleetWise Developer Guide.
Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for DynamoDB and Kinesis event sources:
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:
DestinationConfig
– Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket.
For information about which configuration parameters apply to each event source, see the following topics.
Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.
By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.
To send an invocation record to a queue, topic, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.
" + "documentation":"Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.
By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.
To send an invocation record to a queue, topic, S3 bucket, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
DestinationConfig
– Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for DynamoDB and Kinesis event sources:
BisectBatchOnFunctionError
– If the function returns an error, split the batch in two and retry.
MaximumRecordAgeInSeconds
– Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
– Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
– Process multiple batches from each shard concurrently.
For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:
DestinationConfig
– Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket.
For information about which configuration parameters apply to each event source, see the following topics.
The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" + }, + "MetricsConfig":{ + "shape":"EventSourceMappingMetricsConfig", + "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" } } }, @@ -2589,10 +2593,34 @@ "EventSourceMappingArn":{ "shape":"EventSourceMappingArn", "documentation":"The Amazon Resource Name (ARN) of the event source mapping.
" + }, + "MetricsConfig":{ + "shape":"EventSourceMappingMetricsConfig", + "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" } }, "documentation":"A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.
" }, + "EventSourceMappingMetric":{ + "type":"string", + "enum":["EventCount"] + }, + "EventSourceMappingMetricList":{ + "type":"list", + "member":{"shape":"EventSourceMappingMetric"}, + "max":1, + "min":0 + }, + "EventSourceMappingMetricsConfig":{ + "type":"structure", + "members":{ + "Metrics":{ + "shape":"EventSourceMappingMetricList", + "documentation":" The metrics you want your event source mapping to produce. Include EventCount
to receive event source mapping metrics related to the number of events processed by your event source mapping. For more information about these metrics, see Event source mapping metrics.
The metrics configuration for your event source. Use this configuration object to define which metrics you want your event source mapping to produce.
" + }, "EventSourceMappingsList":{ "type":"list", "member":{"shape":"EventSourceMappingConfiguration"} @@ -2931,7 +2959,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The Amazon Resource Name (ARN) of the destination resource.
To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.
To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
" + "documentation":"The Amazon Resource Name (ARN) of the destination resource.
To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
" } }, "documentation":"A destination for events that failed processing.
" @@ -4778,7 +4806,7 @@ "documentation":"The Amazon Resource Name (ARN) of the destination resource.
" } }, - "documentation":"A destination for events that were processed successfully.
" + "documentation":"A destination for events that were processed successfully.
To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
" }, "OrganizationId":{ "type":"string", @@ -5086,7 +5114,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" + }, + "MetricsConfig":{ + "shape":"EventSourceMappingMetricsConfig", + "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" } } }, @@ -6243,7 +6275,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
Overview
Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.
The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.
You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.
For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..
When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..
CA certificates
Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:
Microsoft Windows versions that have updates from January 2005 or later installed contain at least one of the required CAs in their trust list.
Mac OS X 10.4 with Java for Mac OS X 10.4 Release 5 (February 2007), Mac OS X 10.5 (October 2007), and later versions contain at least one of the required CAs in their trust list.
Red Hat Enterprise Linux 5 (March 2007), 6, and 7 and CentOS 5, 6, and 7 all contain at least one of the required CAs in their default trusted CA list.
Java 1.4.2_12 (May 2006), 5 Update 2 (March 2005), and all later versions, including Java 6 (December 2006), 7, and 8, contain at least one of the required CAs in their default trusted CA list.
When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:
Amazon Root CA 1
Starfield Services Root Certificate Authority - G2
Starfield Class 2 Certification Authority
Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.
" + "documentation":"Overview
Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.
The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.
You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.
For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..
When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..
CA certificates
Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:
Microsoft Windows versions that have updates from January 2005 or later installed contain at least one of the required CAs in their trust list.
Mac OS X 10.4 with Java for Mac OS X 10.4 Release 5 (February 2007), Mac OS X 10.5 (October 2007), and later versions contain at least one of the required CAs in their trust list.
Red Hat Enterprise Linux 5 (March 2007), 6, and 7 and CentOS 5, 6, and 7 all contain at least one of the required CAs in their default trusted CA list.
Java 1.4.2_12 (May 2006), 5 Update 2 (March 2005), and all later versions, including Java 6 (December 2006), 7, and 8, contain at least one of the required CAs in their default trusted CA list.
When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:
Amazon Root CA 1
Starfield Services Root Certificate Authority - G2
Starfield Class 2 Certification Authority
Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.
" } diff --git a/botocore/data/logs/2014-03-28/paginators-1.json b/botocore/data/logs/2014-03-28/paginators-1.json index e62c55bcc5..3f07c72559 100644 --- a/botocore/data/logs/2014-03-28/paginators-1.json +++ b/botocore/data/logs/2014-03-28/paginators-1.json @@ -92,6 +92,12 @@ "limit_key": "limit", "output_token": "nextToken", "result_key": "configurationTemplates" + }, + "ListLogGroupsForQuery": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "logGroupIdentifiers" } } } diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 0dfaba30f7..7e7a364a07 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -142,7 +142,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"OperationAbortedException"} ], - "documentation":"Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups or a subset of log groups in the account. Log-group level policies will still be in effect.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.
To delete a data protection policy, you must have the logs:DeleteDataProtectionPolicy
and logs:DeleteAccountPolicy
permissions.
To delete a subscription filter policy, you must have the logs:DeleteSubscriptionFilter
and logs:DeleteAccountPolicy
permissions.
Deletes a CloudWatch Logs account policy. This stops the account-wide policy from applying to log groups in the account. If you delete a data protection policy or subscription filter policy, any log-group level policies of those types remain in effect.
To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.
To delete a data protection policy, you must have the logs:DeleteDataProtectionPolicy
and logs:DeleteAccountPolicy
permissions.
To delete a subscription filter policy, you must have the logs:DeleteSubscriptionFilter
and logs:DeleteAccountPolicy
permissions.
To delete a transformer policy, you must have the logs:DeleteTransformer
and logs:DeleteAccountPolicy
permissions.
To delete a field index policy, you must have the logs:DeleteIndexPolicy
and logs:DeleteAccountPolicy
permissions.
If you delete a field index policy, the indexing of the log events that happened before you deleted the policy will still be used for up to 30 days to improve CloudWatch Logs Insights queries.
" }, "DeleteDataProtectionPolicy":{ "name":"DeleteDataProtectionPolicy", @@ -240,6 +240,23 @@ ], "documentation":"Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.
" }, + "DeleteIndexPolicy":{ + "name":"DeleteIndexPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteIndexPolicyRequest"}, + "output":{"shape":"DeleteIndexPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Deletes a log-group level field index policy that was applied to a single log group. The indexing of the log events that happened before you delete the policy will still be used for as many as 30 days to improve CloudWatch Logs Insights queries.
You can't use this operation to delete an account-level index policy. Instead, use DeletAccountPolicy.
If you delete a log-group level field index policy and there is an account-level field index policy, in a few minutes the log group begins using that account-wide policy to index new incoming log events.
" + }, "DeleteLogAnomalyDetector":{ "name":"DeleteLogAnomalyDetector", "http":{ @@ -359,6 +376,22 @@ ], "documentation":"Deletes the specified subscription filter.
" }, + "DeleteTransformer":{ + "name":"DeleteTransformer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTransformerRequest"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"OperationAbortedException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"Deletes the log transformer for the specified log group. As soon as you do this, the transformation of incoming log events according to that transformer stops. If this account has an account-level transformer that applies to this log group, the log group begins using that account-level transformer when this log-group level transformer is deleted.
After you delete a transformer, be sure to edit any metric filters or subscription filters that relied on the transformed versions of the log events.
" + }, "DescribeAccountPolicies":{ "name":"DescribeAccountPolicies", "http":{ @@ -467,6 +500,40 @@ ], "documentation":"Lists the specified export tasks. You can list all your export tasks or filter the results based on task ID or task status.
" }, + "DescribeFieldIndexes":{ + "name":"DescribeFieldIndexes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFieldIndexesRequest"}, + "output":{"shape":"DescribeFieldIndexesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Returns a list of field indexes listed in the field index policies of one or more log groups. For more information about field index policies, see PutIndexPolicy.
" + }, + "DescribeIndexPolicies":{ + "name":"DescribeIndexPolicies", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeIndexPoliciesRequest"}, + "output":{"shape":"DescribeIndexPoliciesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Returns the field index policies of one or more log groups. For more information about field index policies, see PutIndexPolicy.
If a specified log group has a log-group level index policy, that policy is returned by this operation.
If a specified log group doesn't have a log-group level index policy, but an account-wide index policy applies to it, that account-wide policy is returned by this operation.
To find information about only account-level policies, use DescribeAccountPolicies instead.
" + }, "DescribeLogGroups":{ "name":"DescribeLogGroups", "http":{ @@ -759,6 +826,22 @@ ], "documentation":"Returns the results from the specified query.
Only the fields requested in the query are returned, along with a @ptr
field, which is the identifier for the log record. You can use the value of @ptr
in a GetLogRecord operation to get the full log record.
GetQueryResults
does not start running a query. To run a query, use StartQuery. For more information about how long results of previous queries are available, see CloudWatch Logs quotas.
If the value of the Status
field in the output is Running
, this operation returns only partial results. If you see a value of Scheduled
or Running
for the status, you can retry the operation later to see the final results.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see CloudWatch cross-account observability.
" }, + "GetTransformer":{ + "name":"GetTransformer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTransformerRequest"}, + "output":{"shape":"GetTransformerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"Returns the information about the log transformer associated with this log group.
This operation returns data only for transformers created at the log group level. To get information for an account-level transformer, use DescribeAccountPolicies.
" + }, "ListAnomalies":{ "name":"ListAnomalies", "http":{ @@ -791,6 +874,22 @@ ], "documentation":"Retrieves a list of the log anomaly detectors in the account.
" }, + "ListLogGroupsForQuery":{ + "name":"ListLogGroupsForQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLogGroupsForQueryRequest"}, + "output":{"shape":"ListLogGroupsForQueryResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Returns a list of the log groups that were analyzed during a single CloudWatch Logs Insights query. This can be useful for queries that use log group name prefixes or the filterIndex
command, because the log groups are dynamically selected in these cases.
For more information about field indexes, see Create field indexes to improve query performance and reduce costs.
" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -836,7 +935,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates an account-level data protection policy, subscription filter policy, or field index policy that applies to all log groups or a subset of log groups in the account.
Data protection policy
A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy
and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
Subscription filter policy
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName
. To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Transformer policy
Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Field index policy
You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId in [value, value, ...]
will attempt to process only the log events where the indexed field matches the specified value.
Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId
won't match a log event containing requestId
.
You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria
parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log
, you can't have another field index policy filtered to my-logpprod
or my-logging
.
If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.
If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy
. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.
Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.
" }, + "PutIndexPolicy":{ + "name":"PutIndexPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutIndexPolicyRequest"}, + "output":{"shape":"PutIndexPolicyResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"Creates or updates a field index policy for the specified log group. Only log groups in the Standard log class support field index policies. For more information about log classes, see Log classes.
You can use field index policies to create field indexes on fields found in log events in the log group. Creating field indexes speeds up and lowers the costs for CloudWatch Logs Insights queries that reference those field indexes, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, userID, and instance IDs. For more information, see Create field indexes to improve query performance and reduce costs.
To find the fields that are in your log group events, use the GetLogGroupFields operation.
For example, suppose you have created a field index for requestId
. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value
or requestId IN [value, value, ...]
will process fewer log events to reduce costs, and have improved performance.
Each index policy has the following quotas and restrictions:
As many as 20 fields can be included in the policy.
Each field name can include as many as 100 characters.
Matches of log events to the names of indexed fields are case-sensitive. For example, a field index of RequestId
won't match a log event containing requestId
.
Log group-level field index policies created with PutIndexPolicy
override account-level field index policies created with PutAccountPolicy. If you use PutIndexPolicy
to create a field index policy for a log group, that log group uses only that policy. The log group ignores any account-wide field index policy that you might have created.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions to create metric filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
Using regular expressions to create metric filters is supported. For these filters, there is a quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Creates or updates a log transformer for a single log group. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contains relevant, source-specific information.
After you have created a transformer, CloudWatch Logs performs the transformations at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filers.
You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.
A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. The processors work one after another, in the order that you list them, like a pipeline. For more information about the available processors to use in a transformer, see Processors that you can use.
Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.
You can create transformers only for the log groups in the Standard log class.
You can also set up a transformer at the account level. For more information, see PutAccountPolicy. If there is both a log-group level transformer created with PutTransformer
and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.
Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
For more information, see CloudWatch Logs Insights Query Syntax.
After you run a query using StartQuery
, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId
that StartQuery
returns.
If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.
Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery
operation, the query definition must be defined in the monitoring account.
You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
" + "documentation":"Starts a query of one or more log groups using CloudWatch Logs Insights. You specify the log groups and time range to query and the query string to use.
For more information, see CloudWatch Logs Insights Query Syntax.
After you run a query using StartQuery
, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId
that StartQuery
returns.
To specify the log groups to query, a StartQuery
operation must include one of the following:
Either exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
Or the queryString
must include a SOURCE
command to select log groups for the query. The SOURCE
command can select log groups based on log group name prefix, account ID, and log class.
For more information about the SOURCE
command, see SOURCE.
If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.
Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery
operation, the query definition must be defined in the monitoring account.
You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
" }, "StopQuery":{ "name":"StopQuery", @@ -1126,6 +1261,21 @@ ], "documentation":"Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.
" }, + "TestTransformer":{ + "name":"TestTransformer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestTransformerRequest"}, + "output":{"shape":"TestTransformerResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"Use this operation to test a log transformer. You enter the transformer configuration and a set of log events to test with. The operation responds with an array that includes the original log events and the transformed versions.
" + }, "UntagLogGroup":{ "name":"UntagLogGroup", "http":{ @@ -1256,7 +1406,7 @@ }, "selectionCriteria":{ "shape":"SelectionCriteria", - "documentation":"The log group selection criteria for this subscription filter policy.
" + "documentation":"The log group selection criteria that is used for this policy.
" }, "accountId":{ "shape":"AccountId", @@ -1266,6 +1416,50 @@ "documentation":"A structure that contains information about one CloudWatch Logs account policy.
" }, "AccountPolicyDocument":{"type":"string"}, + "AddKeyEntries":{ + "type":"list", + "member":{"shape":"AddKeyEntry"}, + "max":5, + "min":1 + }, + "AddKeyEntry":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"Key", + "documentation":"The key of the new entry to be added to the log event
" + }, + "value":{ + "shape":"AddKeyValue", + "documentation":"The value of the new entry to be added to the log event
" + }, + "overwriteIfExists":{ + "shape":"OverwriteIfExists", + "documentation":"Specifies whether to overwrite the value if the key already exists in the log event. If you omit this, the default is false
.
This object defines one key that will be added with the addKeys processor.
" + }, + "AddKeyValue":{ + "type":"string", + "max":256, + "min":1 + }, + "AddKeys":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"AddKeyEntries", + "documentation":"An array of objects, where each object contains the information about one key to add to the log event.
" + } + }, + "documentation":"This processor adds new key-value pairs to the log event.
For more information about this processor including examples, see addKeys in the CloudWatch Logs User Guide.
" + }, "AllowedActionForAllowVendedLogsDeliveryForResource":{"type":"string"}, "AllowedFieldDelimiters":{ "type":"list", @@ -1455,6 +1649,7 @@ "max":90, "min":7 }, + "ApplyOnTransformedLogs":{"type":"boolean"}, "Arn":{"type":"string"}, "AssociateKmsKeyRequest":{ "type":"structure", @@ -1476,6 +1671,28 @@ }, "Baseline":{"type":"boolean"}, "Boolean":{"type":"boolean"}, + "CSV":{ + "type":"structure", + "members":{ + "quoteCharacter":{ + "shape":"QuoteCharacter", + "documentation":"The character used used as a text qualifier for a single column of data. If you omit this, the double quotation mark \"
character is used.
The character used to separate each column in the original comma-separated value log event. If you omit this, the processor looks for the comma ,
character as the delimiter.
An array of names to use for the columns in the transformed log event.
If you omit this, default column names ([column_1, column_2 ...]
) are used.
The path to the field in the log event that has the comma separated values to be parsed. If you omit this value, the whole log message is processed.
" + } + }, + "documentation":"The CSV
processor parses comma-separated values (CSV) from the log events into columns.
For more information about this processor including examples, see csv in the CloudWatch Logs User Guide.
" + }, "CancelExportTaskRequest":{ "type":"structure", "required":["taskId"], @@ -1492,6 +1709,16 @@ "min":36, "pattern":"\\S{36,128}" }, + "Column":{ + "type":"string", + "max":128, + "min":1 + }, + "Columns":{ + "type":"list", + "member":{"shape":"Column"}, + "max":100 + }, "ConfigurationTemplate":{ "type":"structure", "members":{ @@ -1567,6 +1794,45 @@ "documentation":"This operation attempted to create a resource that already exists.
", "exception":true }, + "CopyValue":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"CopyValueEntries", + "documentation":"An array of CopyValueEntry
objects, where each object contains the information about one field value to copy.
This processor copies values within a log event. You can also use this processor to add metadata to log events by copying the values of the following metadata keys into the log events: @logGroupName
, @logGroupStream
, @accountId
, @regionName
.
For more information about this processor including examples, see copyValue in the CloudWatch Logs User Guide.
" + }, + "CopyValueEntries":{ + "type":"list", + "member":{"shape":"CopyValueEntry"}, + "max":5, + "min":1 + }, + "CopyValueEntry":{ + "type":"structure", + "required":[ + "source", + "target" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The key to copy.
" + }, + "target":{ + "shape":"Target", + "documentation":"The key of the field to copy the value to.
" + }, + "overwriteIfExists":{ + "shape":"OverwriteIfExists", + "documentation":"Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is false
.
This object defines one value to be copied with the copyValue processor.
" + }, "Count":{"type":"long"}, "CreateDeliveryRequest":{ "type":"structure", @@ -1758,6 +2024,45 @@ "DISABLED" ] }, + "DateTimeConverter":{ + "type":"structure", + "required":[ + "source", + "target", + "matchPatterns" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The key to apply the date conversion to.
" + }, + "target":{ + "shape":"Target", + "documentation":"The JSON field to store the result in.
" + }, + "targetFormat":{ + "shape":"TargetFormat", + "documentation":"The datetime format to use for the converted data in the target field.
If you omit this, the default of yyyy-MM-dd'T'HH:mm:ss.SSS'Z
is used.
A list of patterns to match against the source
field.
The time zone of the source field. If you omit this, the default used is the UTC zone.
" + }, + "targetTimezone":{ + "shape":"TargetTimezone", + "documentation":"The time zone of the target field. If you omit this, the default used is the UTC zone.
" + }, + "locale":{ + "shape":"Locale", + "documentation":"The locale of the source field. If you omit this, the default of locale.ROOT
is used.
This processor converts a datetime string into a format that you specify.
For more information about this processor including examples, see datetimeConverter in the CloudWatch Logs User Guide.
" + }, "Days":{ "type":"integer", "documentation":"The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, and 3653.
To set a log group so that its log events do not expire, use DeleteRetentionPolicy.
" @@ -1840,6 +2145,32 @@ } } }, + "DeleteIndexPolicyRequest":{ + "type":"structure", + "required":["logGroupIdentifier"], + "members":{ + "logGroupIdentifier":{ + "shape":"LogGroupIdentifier", + "documentation":"The log group to delete the index policy for. You can specify either the name or the ARN of the log group.
" + } + } + }, + "DeleteIndexPolicyResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKeys":{ + "type":"structure", + "required":["withKeys"], + "members":{ + "withKeys":{ + "shape":"DeleteWithKeys", + "documentation":"The list of keys to delete.
" + } + }, + "documentation":"This processor deletes entries from a log event. These entries are key-value pairs.
For more information about this processor including examples, see deleteKeys in the CloudWatch Logs User Guide.
" + }, "DeleteLogAnomalyDetectorRequest":{ "type":"structure", "required":["anomalyDetectorArn"], @@ -1949,6 +2280,27 @@ } } }, + "DeleteTransformerRequest":{ + "type":"structure", + "required":["logGroupIdentifier"], + "members":{ + "logGroupIdentifier":{ + "shape":"LogGroupIdentifier", + "documentation":"Specify either the name or ARN of the log group to delete the transformer for. If the log group is in a source account and you are using a monitoring account, you must use the log group ARN.
" + } + } + }, + "DeleteWithKeys":{ + "type":"list", + "member":{"shape":"WithKey"}, + "max":5, + "min":1 + }, + "Delimiter":{ + "type":"string", + "max":1, + "min":1 + }, "Deliveries":{ "type":"list", "member":{"shape":"Delivery"} @@ -2132,6 +2484,10 @@ "accountIdentifiers":{ "shape":"AccountIds", "documentation":"If you are using an account that is set up as a monitoring account for CloudWatch unified cross-account observability, you can use this to specify the account ID of a source account. If you do, the operation returns the account policy for the specified account. Currently, you can specify only one account ID in this parameter.
If you omit this parameter, only the policy in the current account is returned.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The token for the next set of items to return. (You received this token from a previous call.)
" } } }, @@ -2141,6 +2497,10 @@ "accountPolicies":{ "shape":"AccountPolicies", "documentation":"An array of structures that contain information about the CloudWatch Logs account policies that match the specified filters.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The token to use when requesting the next set of items. The token expires after 24 hours.
" } } }, @@ -2298,25 +2658,79 @@ "nextToken":{"shape":"NextToken"} } }, - "DescribeLimit":{ - "type":"integer", - "max":50, + "DescribeFieldIndexesLogGroupIdentifiers":{ + "type":"list", + "member":{"shape":"LogGroupIdentifier"}, + "max":100, "min":1 }, - "DescribeLogGroupsRequest":{ + "DescribeFieldIndexesRequest":{ "type":"structure", + "required":["logGroupIdentifiers"], "members":{ - "accountIdentifiers":{ - "shape":"AccountIds", - "documentation":"When includeLinkedAccounts
is set to True
, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.
An array containing the names or ARNs of the log groups that you want to retrieve field indexes for.
" }, - "logGroupNamePrefix":{ - "shape":"LogGroupName", - "documentation":"The prefix to match.
logGroupNamePrefix
and logGroupNamePattern
are mutually exclusive. Only one of these parameters can be passed.
An array containing the field index information.
" }, - "logGroupNamePattern":{ - "shape":"LogGroupNamePattern", - "documentation":"If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo
, log groups named FooBar
, aws/Foo
, and GroupFoo
would match, but foo
, F/o/o
and Froo
would not match.
If you specify logGroupNamePattern
in your request, then only arn
, creationTime
, and logGroupName
are included in the response.
logGroupNamePattern
and logGroupNamePrefix
are mutually exclusive. Only one of these parameters can be passed.
An array containing the name or ARN of the log group that you want to retrieve field index policies for.
" + }, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeIndexPoliciesResponse":{ + "type":"structure", + "members":{ + "indexPolicies":{ + "shape":"IndexPolicies", + "documentation":"An array containing the field index policies.
" + }, + "nextToken":{"shape":"NextToken"} + } + }, + "DescribeLimit":{ + "type":"integer", + "max":50, + "min":1 + }, + "DescribeLogGroupsRequest":{ + "type":"structure", + "members":{ + "accountIdentifiers":{ + "shape":"AccountIds", + "documentation":"When includeLinkedAccounts
is set to True
, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.
The prefix to match.
logGroupNamePrefix
and logGroupNamePattern
are mutually exclusive. Only one of these parameters can be passed.
If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo
, log groups named FooBar
, aws/Foo
, and GroupFoo
would match, but foo
, F/o/o
and Froo
would not match.
If you specify logGroupNamePattern
in your request, then only arn
, creationTime
, and logGroupName
are included in the response.
logGroupNamePattern
and logGroupNamePrefix
are mutually exclusive. Only one of these parameters can be passed.
Reserved for internal use.
" + "documentation":"The attributes of the entity which identify the specific entity, as a list of key-value pairs. Entities with the same keyAttributes
are considered to be the same entity.
There are five allowed attributes (key names): Type
, ResourceType
, Identifier
Name
, and Environment
.
For details about how to use the key attributes, see How to add related information to telemetry in the CloudWatch User Guide.
" }, "attributes":{ "shape":"EntityAttributes", - "documentation":"Reserved for internal use.
" + "documentation":"Additional attributes of the entity that are not used to specify the identity of the entity. A list of key-value pairs.
For details about how to use the attributes, see How to add related information to telemetry in the CloudWatch User Guide.
" } }, - "documentation":"Reserved for internal use.
" + "documentation":"The entity associated with the log events in a PutLogEvents
call.
If this field index appears in an index policy that applies only to a single log group, the ARN of that log group is displayed here.
" + }, + "fieldIndexName":{ + "shape":"FieldIndexName", + "documentation":"The string that this field index matches.
" + }, + "lastScanTime":{ + "shape":"Timestamp", + "documentation":"The most recent time that CloudWatch Logs scanned ingested log events to search for this field index to improve the speed of future CloudWatch Logs Insights queries that search for this field index.
" + }, + "firstEventTime":{ + "shape":"Timestamp", + "documentation":"The time and date of the earliest log event that matches this field index, after the index policy that contains it was created.
" + }, + "lastEventTime":{ + "shape":"Timestamp", + "documentation":"The time and date of the most recent log event that matches this field index.
" + } + }, + "documentation":"This structure describes one log event field that is used as an index in at least one index policy in this account.
" + }, + "FieldIndexName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "FieldIndexes":{ + "type":"list", + "member":{"shape":"FieldIndex"} + }, "FilterCount":{"type":"integer"}, "FilterLogEventsRequest":{ "type":"structure", @@ -2948,7 +3403,20 @@ "type":"list", "member":{"shape":"FilteredLogEvent"} }, + "Flatten":{"type":"boolean"}, + "FlattenedElement":{ + "type":"string", + "enum":[ + "first", + "last" + ] + }, "ForceUpdate":{"type":"boolean"}, + "FromKey":{ + "type":"string", + "max":128, + "min":1 + }, "GetDataProtectionPolicyRequest":{ "type":"structure", "required":["logGroupIdentifier"], @@ -3239,12 +3707,100 @@ } } }, + "GetTransformerRequest":{ + "type":"structure", + "required":["logGroupIdentifier"], + "members":{ + "logGroupIdentifier":{ + "shape":"LogGroupIdentifier", + "documentation":"Specify either the name or ARN of the log group to return transformer information for. If the log group is in a source account and you are using a monitoring account, you must use the log group ARN.
" + } + } + }, + "GetTransformerResponse":{ + "type":"structure", + "members":{ + "logGroupIdentifier":{ + "shape":"LogGroupIdentifier", + "documentation":"The ARN of the log group that you specified in your request.
" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"The creation time of the transformer, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"The date and time when this transformer was most recently modified, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + }, + "transformerConfig":{ + "shape":"Processors", + "documentation":"This sructure contains the configuration of the requested transformer.
" + } + } + }, + "Grok":{ + "type":"structure", + "required":["match"], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The path to the field in the log event that you want to parse. If you omit this value, the whole log message is parsed.
" + }, + "match":{ + "shape":"GrokMatch", + "documentation":"The grok pattern to match against the log event. For a list of supported grok patterns, see Supported grok patterns.
" + } + }, + "documentation":"This processor uses pattern matching to parse and structure unstructured data. This processor can also extract fields from log messages.
For more information about this processor including examples, see grok in the CloudWatch Logs User Guide.
" + }, + "GrokMatch":{ + "type":"string", + "max":128, + "min":1 + }, "Histogram":{ "type":"map", "key":{"shape":"Time"}, "value":{"shape":"Count"} }, "IncludeLinkedAccounts":{"type":"boolean"}, + "IndexPolicies":{ + "type":"list", + "member":{"shape":"IndexPolicy"} + }, + "IndexPolicy":{ + "type":"structure", + "members":{ + "logGroupIdentifier":{ + "shape":"LogGroupIdentifier", + "documentation":"The ARN of the log group that this index policy applies to.
" + }, + "lastUpdateTime":{ + "shape":"Timestamp", + "documentation":"The date and time that this index policy was most recently updated.
" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"The policy document for this index policy, in JSON format.
" + }, + "policyName":{ + "shape":"PolicyName", + "documentation":"The name of this policy. Responses about log group-level field index policies don't have this field, because those policies don't have names.
" + }, + "source":{ + "shape":"IndexSource", + "documentation":"This field indicates whether this is an account-level index policy or an index policy that applies only to a single log group.
" + } + }, + "documentation":"This structure contains information about one field index policy in this account.
" + }, + "IndexSource":{ + "type":"string", + "enum":[ + "ACCOUNT", + "LOG_GROUP" + ] + }, "InferredTokenName":{ "type":"string", "min":1 @@ -3312,6 +3868,21 @@ "exception":true }, "IsSampled":{"type":"boolean"}, + "Key":{ + "type":"string", + "max":128, + "min":1 + }, + "KeyPrefix":{ + "type":"string", + "max":128, + "min":1 + }, + "KeyValueDelimiter":{ + "type":"string", + "max":128, + "min":1 + }, "KmsKeyId":{ "type":"string", "max":256 @@ -3385,6 +3956,36 @@ "nextToken":{"shape":"NextToken"} } }, + "ListLogGroupsForQueryMaxResults":{ + "type":"integer", + "max":500, + "min":50 + }, + "ListLogGroupsForQueryRequest":{ + "type":"structure", + "required":["queryId"], + "members":{ + "queryId":{ + "shape":"QueryId", + "documentation":"The ID of the query to use. This query ID is from the response to your StartQuery operation.
" + }, + "nextToken":{"shape":"NextToken"}, + "maxResults":{ + "shape":"ListLogGroupsForQueryMaxResults", + "documentation":"Limits the number of returned log groups to the specified number.
" + } + } + }, + "ListLogGroupsForQueryResponse":{ + "type":"structure", + "members":{ + "logGroupIdentifiers":{ + "shape":"LogGroupIdentifiers", + "documentation":"An array of the names and ARNs of the log groups that were processed in the query.
" + }, + "nextToken":{"shape":"NextToken"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -3427,6 +4028,40 @@ "deprecated":true, "deprecatedMessage":"Please use the generic tagging API model ListTagsForResourceRequest and ListTagsForResourceResponse" }, + "ListToMap":{ + "type":"structure", + "required":[ + "source", + "key" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The key in the log event that has a list of objects that will be converted to a map.
" + }, + "key":{ + "shape":"Key", + "documentation":"The key of the field to be extracted as keys in the generated map
" + }, + "valueKey":{ + "shape":"ValueKey", + "documentation":"If this is specified, the values that you specify in this parameter will be extracted from the source
objects and put into the values of the generated map. Otherwise, original objects in the source list will be put into the values of the generated map.
The key of the field that will hold the generated map
" + }, + "flatten":{ + "shape":"Flatten", + "documentation":"A Boolean value to indicate whether the list will be flattened into single items. Specify true
to flatten the list. The default is false
If you set flatten
to true
, use flattenedElement
to specify which element, first
or last
, to keep.
You must specify this parameter if flatten
is true
This processor takes a list of objects that contain key fields, and converts them into a map of target keys.
For more information about this processor including examples, see listToMap in the CloudWatch Logs User Guide.
" + }, "LiveTailSessionLogEvent":{ "type":"structure", "members":{ @@ -3513,6 +4148,10 @@ "documentation":"This object contains the log events and metadata for a Live Tail session.
", "event":true }, + "Locale":{ + "type":"string", + "min":1 + }, "LogEvent":{ "type":"structure", "members":{ @@ -3713,6 +4352,23 @@ "max":10, "min":1 }, + "LowerCaseString":{ + "type":"structure", + "required":["withKeys"], + "members":{ + "withKeys":{ + "shape":"LowerCaseStringWithKeys", + "documentation":"The array caontaining the keys of the fields to convert to lowercase.
" + } + }, + "documentation":"This processor converts a string to lowercase.
For more information about this processor including examples, see lowerCaseString in the CloudWatch Logs User Guide.
" + }, + "LowerCaseStringWithKeys":{ + "type":"list", + "member":{"shape":"WithKey"}, + "max":10, + "min":1 + }, "MalformedQueryException":{ "type":"structure", "members":{ @@ -3721,6 +4377,16 @@ "documentation":"The query string is not valid. Details about this error are displayed in a QueryCompileError
object. For more information, see QueryCompileError.
For more information about valid query syntax, see CloudWatch Logs Insights Query Syntax.
", "exception":true }, + "MatchPattern":{ + "type":"string", + "min":1 + }, + "MatchPatterns":{ + "type":"list", + "member":{"shape":"MatchPattern"}, + "max":5, + "min":1 + }, "Message":{"type":"string"}, "MetricFilter":{ "type":"structure", @@ -3741,6 +4407,10 @@ "logGroupName":{ "shape":"LogGroupName", "documentation":"The name of the log group.
" + }, + "applyOnTransformedLogs":{ + "shape":"ApplyOnTransformedLogs", + "documentation":"This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer.
If this value is true
, the metric filter is applied on the transformed version of the log events instead of the original ingested log events.
Metric filters express how CloudWatch Logs would extract metric observations from ingested log events and transform them into metric data in a CloudWatch metric.
" @@ -3828,11 +4498,55 @@ "documentation":"The value to publish to the CloudWatch metric. For example, if you're counting the occurrences of a term like Error
, the value is 1
for each occurrence. If you're counting the bytes transferred, the value is the value in the log event.
The key to move.
" + }, + "target":{ + "shape":"Target", + "documentation":"The key to move to.
" + }, + "overwriteIfExists":{ + "shape":"OverwriteIfExists", + "documentation":"Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is false
.
This object defines one key that will be moved with the moveKey processor.
" + }, + "MoveKeys":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"MoveKeyEntries", + "documentation":"An array of objects, where each object contains the information about one key to move.
" + } + }, + "documentation":"This processor moves a key from one field to another. The original key is deleted.
For more information about this processor including examples, see moveKeys in the CloudWatch Logs User Guide.
" + }, "NextToken":{ "type":"string", "documentation":"The token for the next set of items to return. The token expires after 24 hours.
", "min":1 }, + "NonMatchValue":{ + "type":"string", + "max":128, + "min":1 + }, "OperationAbortedException":{ "type":"structure", "members":{ @@ -3883,6 +4597,110 @@ "type":"list", "member":{"shape":"OutputLogEvent"} }, + "OverwriteIfExists":{"type":"boolean"}, + "ParseCloudfront":{ + "type":"structure", + "members":{ + "source":{ + "shape":"Source", + "documentation":"Omit this parameter and the whole log message will be processed by this processor. No other value than @message
is allowed for source
.
This processor parses CloudFront vended logs, extract fields, and convert them into JSON format. Encoded field values are decoded. Values that are integers and doubles are treated as such. For more information about this processor including examples, see parseCloudfront
For more information about CloudFront log format, see Configure and use standard logs (access logs).
If you use this processor, it must be the first processor in your transformer.
" + }, + "ParseJSON":{ + "type":"structure", + "members":{ + "source":{ + "shape":"Source", + "documentation":"Path to the field in the log event that will be parsed. Use dot notation to access child fields. For example, store.book
The location to put the parsed key value pair into. If you omit this parameter, it is placed under the root node.
" + } + }, + "documentation":"This processor parses log events that are in JSON format. It can extract JSON key-value pairs and place them under a destination that you specify.
Additionally, because you must have at least one parse-type processor in a transformer, you can use ParseJSON
as that processor for JSON-format logs, so that you can also apply other processors, such as mutate processors, to these logs.
For more information about this processor including examples, see parseJSON in the CloudWatch Logs User Guide.
" + }, + "ParseKeyValue":{ + "type":"structure", + "members":{ + "source":{ + "shape":"Source", + "documentation":"Path to the field in the log event that will be parsed. Use dot notation to access child fields. For example, store.book
The destination field to put the extracted key-value pairs into
" + }, + "fieldDelimiter":{ + "shape":"ParserFieldDelimiter", + "documentation":"The field delimiter string that is used between key-value pairs in the original log events. If you omit this, the ampersand &
character is used.
The delimiter string to use between the key and value in each pair in the transformed log event.
If you omit this, the equal =
character is used.
If you want to add a prefix to all transformed keys, specify it here.
" + }, + "nonMatchValue":{ + "shape":"NonMatchValue", + "documentation":"A value to insert into the value field in the result, when a key-value pair is not successfully split.
" + }, + "overwriteIfExists":{ + "shape":"OverwriteIfExists", + "documentation":"Specifies whether to overwrite the value if the destination key already exists. If you omit this, the default is false
.
This processor parses a specified field in the original log event into key-value pairs.
For more information about this processor including examples, see parseKeyValue in the CloudWatch Logs User Guide.
" + }, + "ParsePostgres":{ + "type":"structure", + "members":{ + "source":{ + "shape":"Source", + "documentation":"Omit this parameter and the whole log message will be processed by this processor. No other value than @message
is allowed for source
.
Use this processor to parse RDS for PostgreSQL vended logs, extract fields, and and convert them into a JSON format. This processor always processes the entire log event message. For more information about this processor including examples, see parsePostGres.
For more information about RDS for PostgreSQL log format, see RDS for PostgreSQL database log filesTCP flag sequence.
If you use this processor, it must be the first processor in your transformer.
Omit this parameter and the whole log message will be processed by this processor. No other value than @message
is allowed for source
.
Use this processor to parse Route 53 vended logs, extract fields, and and convert them into a JSON format. This processor always processes the entire log event message. For more information about this processor including examples, see parseRoute53.
If you use this processor, it must be the first processor in your transformer.
Omit this parameter and the whole log message will be processed by this processor. No other value than @message
is allowed for source
.
Use this processor to parse Amazon VPC vended logs, extract fields, and and convert them into a JSON format. This processor always processes the entire log event message.
This processor doesn't support custom log formats, such as NAT gateway logs. For more information about custom log formats in Amazon VPC, see parseVPC For more information about this processor including examples, see parseVPC.
If you use this processor, it must be the first processor in your transformer.
Omit this parameter and the whole log message will be processed by this processor. No other value than @message
is allowed for source
.
Use this processor to parse WAF vended logs, extract fields, and and convert them into a JSON format. This processor always processes the entire log event message. For more information about this processor including examples, see parseWAF.
For more information about WAF log format, see Log examples for web ACL traffic.
If you use this processor, it must be the first processor in your transformer.
Use this parameter to include the addKeys processor in your transformer.
" + }, + "copyValue":{ + "shape":"CopyValue", + "documentation":"Use this parameter to include the copyValue processor in your transformer.
" + }, + "csv":{ + "shape":"CSV", + "documentation":"Use this parameter to include the CSV processor in your transformer.
" + }, + "dateTimeConverter":{ + "shape":"DateTimeConverter", + "documentation":"Use this parameter to include the datetimeConverter processor in your transformer.
" + }, + "deleteKeys":{ + "shape":"DeleteKeys", + "documentation":"Use this parameter to include the deleteKeys processor in your transformer.
" + }, + "grok":{ + "shape":"Grok", + "documentation":"Use this parameter to include the grok processor in your transformer.
" + }, + "listToMap":{ + "shape":"ListToMap", + "documentation":"Use this parameter to include the listToMap processor in your transformer.
" + }, + "lowerCaseString":{ + "shape":"LowerCaseString", + "documentation":"Use this parameter to include the lowerCaseString processor in your transformer.
" + }, + "moveKeys":{ + "shape":"MoveKeys", + "documentation":"Use this parameter to include the moveKeys processor in your transformer.
" + }, + "parseCloudfront":{ + "shape":"ParseCloudfront", + "documentation":"Use this parameter to include the parseCloudfront processor in your transformer.
If you use this processor, it must be the first processor in your transformer.
" + }, + "parseJSON":{ + "shape":"ParseJSON", + "documentation":"Use this parameter to include the parseJSON processor in your transformer.
" + }, + "parseKeyValue":{ + "shape":"ParseKeyValue", + "documentation":"Use this parameter to include the parseKeyValue processor in your transformer.
" + }, + "parseRoute53":{ + "shape":"ParseRoute53", + "documentation":"Use this parameter to include the parseRoute53 processor in your transformer.
If you use this processor, it must be the first processor in your transformer.
" + }, + "parsePostgres":{ + "shape":"ParsePostgres", + "documentation":"Use this parameter to include the parsePostGres processor in your transformer.
If you use this processor, it must be the first processor in your transformer.
" + }, + "parseVPC":{ + "shape":"ParseVPC", + "documentation":"Use this parameter to include the parseVPC processor in your transformer.
If you use this processor, it must be the first processor in your transformer.
" + }, + "parseWAF":{ + "shape":"ParseWAF", + "documentation":"Use this parameter to include the parseWAF processor in your transformer.
If you use this processor, it must be the first processor in your transformer.
" + }, + "renameKeys":{ + "shape":"RenameKeys", + "documentation":"Use this parameter to include the renameKeys processor in your transformer.
" + }, + "splitString":{ + "shape":"SplitString", + "documentation":"Use this parameter to include the splitString processor in your transformer.
" + }, + "substituteString":{ + "shape":"SubstituteString", + "documentation":"Use this parameter to include the substituteString processor in your transformer.
" + }, + "trimString":{ + "shape":"TrimString", + "documentation":"Use this parameter to include the trimString processor in your transformer.
" + }, + "typeConverter":{ + "shape":"TypeConverter", + "documentation":"Use this parameter to include the typeConverter processor in your transformer.
" + }, + "upperCaseString":{ + "shape":"UpperCaseString", + "documentation":"Use this parameter to include the upperCaseString processor in your transformer.
" + } + }, + "documentation":"This structure contains the information about one processor in a log transformer.
" + }, + "Processors":{ + "type":"list", + "member":{"shape":"Processor"}, + "max":20, + "min":1 + }, "PutAccountPolicyRequest":{ "type":"structure", "required":[ @@ -3972,7 +4892,7 @@ }, "policyDocument":{ "shape":"AccountPolicyDocument", - "documentation":"Specify the policy, in JSON.
Data protection policy
A data protection policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is different than the operation's policyName
parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
Subscription filter policy
A subscription filter policy can include the following attributes in a JSON block:
DestinationArn The ARN of the destination to deliver log events to. Supported destinations are:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.
FilterPattern A filter pattern for subscribing to a filtered stream of log events.
Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random
for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
Specify the policy, in JSON.
Data protection policy
A data protection policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
, Description
, and Version
fields. The Name
is different than the operation's policyName
parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
Subscription filter policy
A subscription filter policy can include the following attributes in a JSON block:
DestinationArn The ARN of the destination to deliver log events to. Supported destinations are:
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
An Firehose data stream in the same account as the subscription policy, for same-account delivery.
A Lambda function in the same account as the subscription policy, for same-account delivery.
A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.
FilterPattern A filter pattern for subscribing to a filtered stream of log events.
Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random
for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
Transformer policy
A transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see Processors that you can use.
Field index policy
A field index filter policy can include the following attribute in a JSON block:
Fields The array of field indexes to create.
It must contain at least one field index.
The following is an example of an index policy document that creates two indexes, RequestId
and TransactionId
.
\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"RequestId\\\", \\\"TransactionId\\\" ] }\"
Use this parameter to apply the subscription filter policy to a subset of log groups in the account. Currently, the only supported filter is LogGroupName NOT IN []
. The selectionCriteria
string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.
Using the selectionCriteria
parameter is useful to help prevent infinite loops. For more information, see Log recursion prevention.
Specifing selectionCriteria
is valid only when you specify SUBSCRIPTION_FILTER_POLICY
for policyType
.
Use this parameter to apply the new policy to a subset of log groups in the account.
Specifing selectionCriteria
is valid only when you specify SUBSCRIPTION_FILTER_POLICY
, FIELD_INDEX_POLICY
or TRANSFORMER_POLICY
for policyType
.
If policyType
is SUBSCRIPTION_FILTER_POLICY
, the only supported selectionCriteria
filter is LogGroupName NOT IN []
If policyType
is FIELD_INDEX_POLICY
or TRANSFORMER_POLICY
, the only supported selectionCriteria
filter is LogGroupNamePrefix
The selectionCriteria
string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.
Using the selectionCriteria
parameter with SUBSCRIPTION_FILTER_POLICY
is useful to help prevent infinite loops. For more information, see Log recursion prevention.
Specify either the log group name or log group ARN to apply this field index policy to. If you specify an ARN, use the format arn:aws:logs:region:account-id:log-group:log_group_name Don't include an * at the end.
" + }, + "policyDocument":{ + "shape":"PolicyDocument", + "documentation":"The index policy document, in JSON format. The following is an example of an index policy document that creates two indexes, RequestId
and TransactionId
.
\"policyDocument\": \"{ \"Fields\": [ \"RequestId\", \"TransactionId\" ] }\"
The policy document must include at least one field index. For more information about the fields that can be included and other restrictions, see Field index syntax and quotas.
" + } + } + }, + "PutIndexPolicyResponse":{ + "type":"structure", + "members":{ + "indexPolicy":{ + "shape":"IndexPolicy", + "documentation":"The index policy that you just created or updated.
" + } + } + }, "PutLogEventsRequest":{ "type":"structure", "required":[ @@ -4208,7 +5154,7 @@ }, "entity":{ "shape":"Entity", - "documentation":"Reserved for internal use.
" + "documentation":"The entity associated with the log events.
" } } }, @@ -4225,7 +5171,7 @@ }, "rejectedEntityInfo":{ "shape":"RejectedEntityInfo", - "documentation":"Reserved for internal use.
" + "documentation":"Information about why the entity is rejected when calling PutLogEvents
. Only returned when the entity is rejected.
When the entity is rejected, the events may still be accepted.
A collection of information that defines how metric data gets emitted.
" + }, + "applyOnTransformedLogs":{ + "shape":"ApplyOnTransformedLogs", + "documentation":"This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer.
If the log group uses either a log-group level or account-level transformer, and you specify true
, the metric filter will be applied on the transformed version of the log events instead of the original ingested log events.
The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to random for a more even distribution. This property is only applicable when the destination is an Amazon Kinesis data stream.
" + }, + "applyOnTransformedLogs":{ + "shape":"ApplyOnTransformedLogs", + "documentation":"This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer.
If the log group uses either a log-group level or account-level transformer, and you specify true
, the subscription filter will be applied on the transformed version of the log events instead of the original ingested log events.
Specify either the name or ARN of the log group to create the transformer for.
" + }, + "transformerConfig":{ + "shape":"Processors", + "documentation":"This structure contains the configuration of this log transformer. A log transformer is an array of processors, where each processor applies one type of transformation to the log events that are ingested.
" } } }, @@ -4490,12 +5461,24 @@ "shape":"StatsValue", "documentation":"The total number of log events scanned during the query.
" }, + "estimatedRecordsSkipped":{ + "shape":"StatsValue", + "documentation":"An estimate of the number of log events that were skipped when processing this query, because the query contained an indexed field. Skipping these entries lowers query costs and improves the query performance time. For more information about field indexes, see PutIndexPolicy.
" + }, "bytesScanned":{ "shape":"StatsValue", "documentation":"The total number of bytes in the log events scanned during the query.
" + }, + "estimatedBytesSkipped":{ + "shape":"StatsValue", + "documentation":"An estimate of the number of bytes in the log events that were skipped when processing this query, because the query contained an indexed field. Skipping these entries lowers query costs and improves the query performance time. For more information about field indexes, see PutIndexPolicy.
" + }, + "logGroupsScanned":{ + "shape":"StatsValue", + "documentation":"The number of log groups that were scanned by this query.
" } }, - "documentation":"Contains the number of log events scanned by the query, the number of log events that matched the query criteria, and the total number of bytes in the log events that were scanned.
" + "documentation":"Contains the number of log events scanned by the query, the number of log events that matched the query criteria, and the total number of bytes in the log events that were scanned.
If the query involved log groups that have field index policies, the estimated number of skipped log events and the total bytes of those skipped log events are included. Using field indexes to skip log events in queries reduces scan volume and improves performance. For more information, see Create field indexes to improve query performance and reduce scan volume.
" }, "QueryStatus":{ "type":"string", @@ -4514,6 +5497,11 @@ "max":10000, "min":0 }, + "QuoteCharacter":{ + "type":"string", + "max":1, + "min":1 + }, "RecordField":{ "type":"structure", "members":{ @@ -4540,10 +5528,10 @@ "members":{ "errorType":{ "shape":"EntityRejectionErrorType", - "documentation":"Reserved for internal use.
" + "documentation":"The type of error that caused the rejection of the entity when calling PutLogEvents
.
Reserved for internal use.
" + "documentation":"If an entity is rejected when a PutLogEvents
request was made, this includes details about the reason for the rejection.
Represents the rejected events.
" }, + "RenameKeyEntries":{ + "type":"list", + "member":{"shape":"RenameKeyEntry"}, + "max":5, + "min":1 + }, + "RenameKeyEntry":{ + "type":"structure", + "required":[ + "key", + "renameTo" + ], + "members":{ + "key":{ + "shape":"Key", + "documentation":"The key to rename
" + }, + "renameTo":{ + "shape":"RenameTo", + "documentation":"The string to use for the new key name
" + }, + "overwriteIfExists":{ + "shape":"OverwriteIfExists", + "documentation":"Specifies whether to overwrite the existing value if the destination key already exists. The default is false
This object defines one key that will be renamed with the renameKey processor.
" + }, + "RenameKeys":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"RenameKeyEntries", + "documentation":"An array of RenameKeyEntry
objects, where each object contains the information about a single key to rename.
Use this processor to rename keys in a log event.
For more information about this processor including examples, see renameKeys in the CloudWatch Logs User Guide.
" + }, + "RenameTo":{ + "type":"string", + "max":128, + "min":1 + }, "RequestId":{ "type":"string", "max":256, @@ -4732,6 +5764,50 @@ "documentation":"This exception is returned in a Live Tail stream when the Live Tail session times out. Live Tail sessions time out after three hours.
", "exception":true }, + "Source":{ + "type":"string", + "max":128, + "min":1 + }, + "SourceTimezone":{ + "type":"string", + "min":1 + }, + "SplitString":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"SplitStringEntries", + "documentation":"An array of SplitStringEntry
objects, where each object contains the information about one field to split.
Use this processor to split a field into an array of strings using a delimiting character.
For more information about this processor including examples, see splitString in the CloudWatch Logs User Guide.
" + }, + "SplitStringEntries":{ + "type":"list", + "member":{"shape":"SplitStringEntry"}, + "max":10, + "min":1 + }, + "SplitStringEntry":{ + "type":"structure", + "required":[ + "source", + "delimiter" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The key of the field to split.
" + }, + "delimiter":{ + "shape":"Delimiter", + "documentation":"The separator characters to split the string entry on.
" + } + }, + "documentation":"This object defines one log field that will be split with the splitString processor.
" + }, "StandardUnit":{ "type":"string", "enum":[ @@ -4835,15 +5911,15 @@ "members":{ "logGroupName":{ "shape":"LogGroupName", - "documentation":"The log group on which to perform the query.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
.
The log group on which to perform the query.
" }, "logGroupNames":{ "shape":"LogGroupNames", - "documentation":"The list of log groups to be queried. You can include up to 50 log groups.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
.
The list of log groups to be queried. You can include up to 50 log groups.
" }, "logGroupIdentifiers":{ "shape":"LogGroupIdentifiers", - "documentation":"The list of log groups to query. You can include up to 50 log groups.
You can specify them by the log group name or ARN. If a log group that you're querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account.
If you specify an ARN, the ARN can't end with an asterisk (*).
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
.
The list of log groups to query. You can include up to 50 log groups.
You can specify them by the log group name or ARN. If a log group that you're querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account.
If you specify an ARN, use the format arn:aws:logs:region:account-id:log-group:log_group_name Don't include an * at the end.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
, or logGroupIdentifiers
.
This parameter is valid only for log groups that have an active log transformer. For more information about log transformers, see PutTransformer.
If this value is true
, the subscription filter is applied on the transformed version of the log events instead of the original ingested log events.
The creation time of the subscription filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
An array of objects, where each object contains the information about one key to match and replace.
" + } + }, + "documentation":"This processor matches a key’s value against a regular expression and replaces all matches with a replacement string.
For more information about this processor including examples, see substituteString in the CloudWatch Logs User Guide.
" + }, + "SubstituteStringEntries":{ + "type":"list", + "member":{"shape":"SubstituteStringEntry"}, + "max":10, + "min":1 + }, + "SubstituteStringEntry":{ + "type":"structure", + "required":[ + "source", + "from", + "to" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"The key to modify
" + }, + "from":{ + "shape":"FromKey", + "documentation":"The regular expression string to be replaced. Special regex characters such as [ and ] must be escaped using \\\\ when using double quotes and with \\ when using single quotes. For more information, see Class Pattern on the Oracle web site.
" + }, + "to":{ + "shape":"ToKey", + "documentation":"The string to be substituted for each match of from
This object defines one log field key that will be replaced using the substituteString processor.
" + }, "Success":{"type":"boolean"}, "SuppressionPeriod":{ "type":"structure", @@ -5038,10 +6158,24 @@ "max":50, "min":1 }, + "Target":{ + "type":"string", + "max":128, + "min":1 + }, "TargetArn":{ "type":"string", "min":1 }, + "TargetFormat":{ + "type":"string", + "max":64, + "min":1 + }, + "TargetTimezone":{ + "type":"string", + "min":1 + }, "TestEventMessages":{ "type":"list", "member":{"shape":"EventMessage"}, @@ -5071,6 +6205,32 @@ } } }, + "TestTransformerRequest":{ + "type":"structure", + "required":[ + "transformerConfig", + "logEventMessages" + ], + "members":{ + "transformerConfig":{ + "shape":"Processors", + "documentation":"This structure contains the configuration of this log transformer that you want to test. A log transformer is an array of processors, where each processor applies one type of transformation to the log events that are ingested.
" + }, + "logEventMessages":{ + "shape":"TestEventMessages", + "documentation":"An array of the raw log events that you want to use to test this transformer.
" + } + } + }, + "TestTransformerResponse":{ + "type":"structure", + "members":{ + "transformedLogs":{ + "shape":"TransformedLogs", + "documentation":"An array where each member of the array includes both the original version and the transformed version of one of the log events that you input.
" + } + } + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -5086,6 +6246,11 @@ "type":"long", "min":0 }, + "ToKey":{ + "type":"string", + "max":128, + "min":1 + }, "Token":{"type":"string"}, "TokenString":{ "type":"string", @@ -5104,6 +6269,93 @@ "documentation":"A resource can have no more than 50 tags.
", "exception":true }, + "TransformedEventMessage":{ + "type":"string", + "min":1 + }, + "TransformedLogRecord":{ + "type":"structure", + "members":{ + "eventNumber":{ + "shape":"EventNumber", + "documentation":"The event number.
" + }, + "eventMessage":{ + "shape":"EventMessage", + "documentation":"The original log event message before it was transformed.
" + }, + "transformedEventMessage":{ + "shape":"TransformedEventMessage", + "documentation":"The log event message after being transformed.
" + } + }, + "documentation":"This structure contains information for one log event that has been processed by a log transformer.
" + }, + "TransformedLogs":{ + "type":"list", + "member":{"shape":"TransformedLogRecord"} + }, + "TrimString":{ + "type":"structure", + "required":["withKeys"], + "members":{ + "withKeys":{ + "shape":"TrimStringWithKeys", + "documentation":"The array containing the keys of the fields to trim.
" + } + }, + "documentation":"Use this processor to remove leading and trailing whitespace.
For more information about this processor including examples, see trimString in the CloudWatch Logs User Guide.
" + }, + "TrimStringWithKeys":{ + "type":"list", + "member":{"shape":"WithKey"}, + "max":10, + "min":1 + }, + "Type":{ + "type":"string", + "enum":[ + "boolean", + "integer", + "double", + "string" + ] + }, + "TypeConverter":{ + "type":"structure", + "required":["entries"], + "members":{ + "entries":{ + "shape":"TypeConverterEntries", + "documentation":"An array of TypeConverterEntry
objects, where each object contains the information about one field to change the type of.
Use this processor to convert a value type associated with the specified key to the specified type. It's a casting processor that changes the types of the specified fields. Values can be converted into one of the following datatypes: integer
, double
, string
and boolean
.
For more information about this processor including examples, see trimString in the CloudWatch Logs User Guide.
" + }, + "TypeConverterEntries":{ + "type":"list", + "member":{"shape":"TypeConverterEntry"}, + "max":5, + "min":1 + }, + "TypeConverterEntry":{ + "type":"structure", + "required":[ + "key", + "type" + ], + "members":{ + "key":{ + "shape":"Key", + "documentation":"The key with the value that is to be converted to a different type.
" + }, + "type":{ + "shape":"Type", + "documentation":"The type to convert the field value to. Valid values are integer
, double
, string
and boolean
.
This object defines one value type that will be converted using the typeConverter processor.
" + }, "Unmask":{"type":"boolean"}, "UnrecognizedClientException":{ "type":"structure", @@ -5231,6 +6483,23 @@ } } }, + "UpperCaseString":{ + "type":"structure", + "required":["withKeys"], + "members":{ + "withKeys":{ + "shape":"UpperCaseStringWithKeys", + "documentation":"The array of containing the keys of the field to convert to uppercase.
" + } + }, + "documentation":"This processor converts a string field to uppercase.
For more information about this processor including examples, see upperCaseString in the CloudWatch Logs User Guide.
" + }, + "UpperCaseStringWithKeys":{ + "type":"list", + "member":{"shape":"WithKey"}, + "max":10, + "min":1 + }, "ValidationException":{ "type":"structure", "members":{ @@ -5238,7 +6507,16 @@ "documentation":"One of the parameters for the request is not valid.
", "exception":true }, - "Value":{"type":"string"} + "Value":{"type":"string"}, + "ValueKey":{ + "type":"string", + "max":128, + "min":1 + }, + "WithKey":{ + "type":"string", + "min":1 + } }, "documentation":"You can use Amazon CloudWatch Logs to monitor, store, and access your log files from EC2 instances, CloudTrail, and other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console. Alternatively, you can use CloudWatch Logs commands in the Amazon Web Services CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
You can use CloudWatch Logs to:
Monitor logs from EC2 instances in real time: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs. Then, it can send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring so no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\"). You can also count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.
Monitor CloudTrail logged events: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail. You can use the notification to perform troubleshooting.
Archive log data: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events earlier than this setting are automatically deleted. The CloudWatch Logs agent helps to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.
Associates a delivery Channel with a particular NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts).
", + "idempotent":true + }, + "CreateEventRule":{ + "name":"CreateEventRule", + "http":{ + "method":"POST", + "requestUri":"/event-rules", + "responseCode":201 + }, + "input":{"shape":"CreateEventRuleRequest"}, + "output":{"shape":"CreateEventRuleResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Creates an EventRule that is associated with a specified Notification Configuration.
", + "idempotent":true + }, + "CreateNotificationConfiguration":{ + "name":"CreateNotificationConfiguration", + "http":{ + "method":"POST", + "requestUri":"/notification-configurations", + "responseCode":201 + }, + "input":{"shape":"CreateNotificationConfigurationRequest"}, + "output":{"shape":"CreateNotificationConfigurationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates a new NotificationConfiguration.
", + "idempotent":true + }, + "DeleteEventRule":{ + "name":"DeleteEventRule", + "http":{ + "method":"DELETE", + "requestUri":"/event-rules/{arn}", + "responseCode":200 + }, + "input":{"shape":"DeleteEventRuleRequest"}, + "output":{"shape":"DeleteEventRuleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Deletes an EventRule.
", + "idempotent":true + }, + "DeleteNotificationConfiguration":{ + "name":"DeleteNotificationConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/notification-configurations/{arn}", + "responseCode":200 + }, + "input":{"shape":"DeleteNotificationConfigurationRequest"}, + "output":{"shape":"DeleteNotificationConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Deletes a NotificationConfiguration.
", + "idempotent":true + }, + "DeregisterNotificationHub":{ + "name":"DeregisterNotificationHub", + "http":{ + "method":"DELETE", + "requestUri":"/notification-hubs/{notificationHubRegion}", + "responseCode":200 + }, + "input":{"shape":"DeregisterNotificationHubRequest"}, + "output":{"shape":"DeregisterNotificationHubResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Deregisters a NotificationHub in the specified Region.
You can't deregister the last NotificationHub in the account. NotificationEvents stored in the deregistered NotificationHub are no longer be visible. Recreating a new NotificationHub in the same Region restores access to those NotificationEvents.
Disassociates a Channel from a specified NotificationConfiguration. Supported Channels include AWS Chatbot, the AWS Console Mobile Application, and emails (notifications-contacts).
", + "idempotent":true + }, + "GetEventRule":{ + "name":"GetEventRule", + "http":{ + "method":"GET", + "requestUri":"/event-rules/{arn}", + "responseCode":200 + }, + "input":{"shape":"GetEventRuleRequest"}, + "output":{"shape":"GetEventRuleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a specified EventRule.
" + }, + "GetNotificationConfiguration":{ + "name":"GetNotificationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/notification-configurations/{arn}", + "responseCode":200 + }, + "input":{"shape":"GetNotificationConfigurationRequest"}, + "output":{"shape":"GetNotificationConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a specified NotificationConfiguration.
" + }, + "GetNotificationEvent":{ + "name":"GetNotificationEvent", + "http":{ + "method":"GET", + "requestUri":"/notification-events/{arn}", + "responseCode":200 + }, + "input":{"shape":"GetNotificationEventRequest"}, + "output":{"shape":"GetNotificationEventResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a specified NotificationEvent.
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. GetNotificationEvent only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the AWS User Notifications User Guide.
Returns a list of Channels for a NotificationConfiguration.
" + }, + "ListEventRules":{ + "name":"ListEventRules", + "http":{ + "method":"GET", + "requestUri":"/event-rules", + "responseCode":200 + }, + "input":{"shape":"ListEventRulesRequest"}, + "output":{"shape":"ListEventRulesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a list of EventRules according to specified filters, in reverse chronological order (newest first).
" + }, + "ListNotificationConfigurations":{ + "name":"ListNotificationConfigurations", + "http":{ + "method":"GET", + "requestUri":"/notification-configurations", + "responseCode":200 + }, + "input":{"shape":"ListNotificationConfigurationsRequest"}, + "output":{"shape":"ListNotificationConfigurationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Returns a list of abbreviated NotificationConfigurations according to specified filters, in reverse chronological order (newest first).
" + }, + "ListNotificationEvents":{ + "name":"ListNotificationEvents", + "http":{ + "method":"GET", + "requestUri":"/notification-events", + "responseCode":200 + }, + "input":{"shape":"ListNotificationEventsRequest"}, + "output":{"shape":"ListNotificationEventsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Returns a list of NotificationEvents according to specified filters, in reverse chronological order (newest first).
User Notifications stores notifications in the individual Regions you register as notification hubs and the Region of the source event rule. ListNotificationEvents only returns notifications stored in the same Region in which the action is called. User Notifications doesn't backfill notifications to new Regions selected as notification hubs. For this reason, we recommend that you make calls in your oldest registered notification hub. For more information, see Notification hubs in the AWS User Notifications User Guide.
Returns a list of NotificationHubs.
" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{arn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns a list of tags for a specified Amazon Resource Name (ARN).
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
This is only supported for NotificationConfigurations.
Registers a NotificationHub in the specified Region.
There is a maximum of one NotificationHub per Region. You can have a maximum of 3 NotificationHubs at a time.
", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{arn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Tags the resource with a tag key and value.
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
This is only supported for NotificationConfigurations.
Untags a resource with a specified Amazon Resource Name (ARN).
For more information, see Tagging your AWS resources in the Tagging AWS Resources User Guide.
", + "idempotent":true + }, + "UpdateEventRule":{ + "name":"UpdateEventRule", + "http":{ + "method":"PUT", + "requestUri":"/event-rules/{arn}", + "responseCode":200 + }, + "input":{"shape":"UpdateEventRuleRequest"}, + "output":{"shape":"UpdateEventRuleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Updates an existing EventRule.
", + "idempotent":true + }, + "UpdateNotificationConfiguration":{ + "name":"UpdateNotificationConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/notification-configurations/{arn}", + "responseCode":200 + }, + "input":{"shape":"UpdateNotificationConfigurationRequest"}, + "output":{"shape":"UpdateNotificationConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Updates a NotificationConfiguration.
", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"User does not have sufficient access to perform this action.
", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountId":{ + "type":"string", + "pattern":"\\d{12}" + }, + "AggregationDuration":{ + "type":"string", + "enum":[ + "LONG", + "SHORT", + "NONE" + ] + }, + "AggregationEventType":{ + "type":"string", + "enum":[ + "AGGREGATE", + "CHILD", + "NONE" + ] + }, + "Arn":{ + "type":"string", + "pattern":"arn:[^:]*:[^:]*:[^:]*:.*" + }, + "AssociateChannelRequest":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn" + ], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"The Amazon Resource Name (ARN) of the Channel to associate with the NotificationConfiguration.
Supported ARNs include AWS Chatbot, the Console Mobile Application, and notifications-contacts.
", + "location":"uri", + "locationName":"arn" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the NotificationConfiguration to associate with the Channel.
" + } + } + }, + "AssociateChannelResponse":{ + "type":"structure", + "members":{ + } + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ChannelArn":{ + "type":"string", + "pattern":"arn:aws:(chatbot|consoleapp|notifications-contacts):[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-_.@]+/[a-zA-Z0-9/_.@:-]+" + }, + "Channels":{ + "type":"list", + "member":{"shape":"ChannelArn"} + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The resource ID that prompted the conflict error.
" + } + }, + "documentation":"Updating or deleting a resource can cause an inconsistent state.
", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateEventRuleRequest":{ + "type":"structure", + "required":[ + "notificationConfigurationArn", + "source", + "eventType", + "regions" + ], + "members":{ + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration associated with this EventRule.
" + }, + "source":{ + "shape":"Source", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + }, + "eventPattern":{ + "shape":"EventRuleEventPattern", + "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + }, + "regions":{ + "shape":"Regions", + "documentation":"A list of AWS Regions that send events to this EventRule.
" + } + } + }, + "CreateEventRuleResponse":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn", + "statusSummaryByRegion" + ], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The ARN of the resource.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of a NotificationConfiguration.
" + }, + "statusSummaryByRegion":{ + "shape":"StatusSummaryByRegion", + "documentation":"A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + } + } + }, + "CreateNotificationConfigurationRequest":{ + "type":"structure", + "required":[ + "name", + "description" + ], + "members":{ + "name":{ + "shape":"NotificationConfigurationName", + "documentation":"The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.
" + }, + "description":{ + "shape":"NotificationConfigurationDescription", + "documentation":"The description of the NotificationConfiguration.
" + }, + "aggregationDuration":{ + "shape":"AggregationDuration", + "documentation":"The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.
" + } + } + }, + "CreateNotificationConfigurationResponse":{ + "type":"structure", + "required":[ + "arn", + "status" + ], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the the resource.
" + }, + "status":{ + "shape":"NotificationConfigurationStatus", + "documentation":"The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The Amazon Resource Name (ARN) of the EventRule to delete.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "DeleteEventRuleResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteNotificationConfigurationRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration to delete.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "DeleteNotificationConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeregisterNotificationHubRequest":{ + "type":"structure", + "required":["notificationHubRegion"], + "members":{ + "notificationHubRegion":{ + "shape":"Region", + "documentation":"The NotificationHub Region.
", + "location":"uri", + "locationName":"notificationHubRegion" + } + } + }, + "DeregisterNotificationHubResponse":{ + "type":"structure", + "required":[ + "notificationHubRegion", + "statusSummary" + ], + "members":{ + "notificationHubRegion":{ + "shape":"Region", + "documentation":"The NotificationHub Region.
" + }, + "statusSummary":{ + "shape":"NotificationHubStatusSummary", + "documentation":"NotificationHub status information.
" + } + } + }, + "Dimension":{ + "type":"structure", + "required":[ + "name", + "value" + ], + "members":{ + "name":{ + "shape":"TextPartReference", + "documentation":"The name of the dimension
" + }, + "value":{ + "shape":"TextPartReference", + "documentation":"The value of the dimension.
" + } + }, + "documentation":"The key-value pair of properties for an event.
" + }, + "Dimensions":{ + "type":"list", + "member":{"shape":"Dimension"}, + "max":10, + "min":0 + }, + "DisassociateChannelRequest":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn" + ], + "members":{ + "arn":{ + "shape":"ChannelArn", + "documentation":"The Amazon Resource Name (ARN) of the Channel to disassociate.
", + "location":"uri", + "locationName":"arn" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the NotificationConfiguration to disassociate.
" + } + } + }, + "DisassociateChannelResponse":{ + "type":"structure", + "members":{ + } + }, + "ErrorMessage":{"type":"string"}, + "EventRuleArn":{ + "type":"string", + "pattern":"arn:aws:notifications::[0-9]{12}:configuration/[a-z0-9]{27}/rule/[a-z0-9]{27}" + }, + "EventRuleEventPattern":{ + "type":"string", + "max":4096, + "min":0 + }, + "EventRuleStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "CREATING", + "UPDATING", + "DELETING" + ] + }, + "EventRuleStatusReason":{"type":"string"}, + "EventRuleStatusSummary":{ + "type":"structure", + "required":[ + "status", + "reason" + ], + "members":{ + "status":{ + "shape":"EventRuleStatus", + "documentation":"The status of the EventRule.
Values:
ACTIVE
The EventRule can process events.
INACTIVE
The EventRule may be unable to process events.
CREATING
The EventRule is being created.
Only GET
and LIST
calls can be run.
UPDATING
The EventRule is being updated.
Only GET
and LIST
calls can be run.
DELETING
The EventRule is being deleted.
Only GET
and LIST
calls can be run.
A human-readable reason for EventRuleStatus.
" + } + }, + "documentation":"Describes EventRule status information.
" + }, + "EventRuleStructure":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn", + "creationTime", + "source", + "eventType", + "eventPattern", + "regions", + "managedRules", + "statusSummaryByRegion" + ], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN for the NotificationConfiguration associated with this EventRule.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The creation time of the resource.
" + }, + "source":{ + "shape":"Source", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + }, + "eventPattern":{ + "shape":"EventRuleEventPattern", + "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + }, + "regions":{ + "shape":"Regions", + "documentation":"A list of AWS Regions that send events to this EventRule.
" + }, + "managedRules":{ + "shape":"ManagedRuleArns", + "documentation":"A list of Amazon EventBridge Managed Rule ARNs associated with this EventRule.
These are created by AWS User Notifications within your account so your EventRules can function.
A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + } + }, + "documentation":"Contains a complete list of fields related to an EventRule.
" + }, + "EventRules":{ + "type":"list", + "member":{"shape":"EventRuleStructure"} + }, + "EventStatus":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY" + ] + }, + "EventType":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([a-zA-Z0-9 \\-\\(\\)])+" + }, + "GetEventRuleRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The Amazon Resource Name (ARN) of the EventRule to return.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "GetEventRuleResponse":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn", + "creationTime", + "source", + "eventType", + "eventPattern", + "regions", + "managedRules", + "statusSummaryByRegion" + ], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The ARN of the resource.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of a NotificationConfiguration.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The date when the EventRule was created.
" + }, + "source":{ + "shape":"Source", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + }, + "eventPattern":{ + "shape":"EventRuleEventPattern", + "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + }, + "regions":{ + "shape":"Regions", + "documentation":"A list of AWS Regions that send events to this EventRule.
" + }, + "managedRules":{ + "shape":"ManagedRuleArns", + "documentation":"A list of managed rules from EventBridge that are are associated with this EventRule.
These are created by AWS User Notifications within your account so this EventRule functions.
A list of an EventRule's status by Region. Regions are mapped to EventRuleStatusSummary.
" + } + } + }, + "GetNotificationConfigurationRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration to return.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "GetNotificationConfigurationResponse":{ + "type":"structure", + "required":[ + "arn", + "name", + "description", + "status", + "creationTime" + ], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the resource.
" + }, + "name":{ + "shape":"NotificationConfigurationName", + "documentation":"The name of the NotificationConfiguration.
" + }, + "description":{ + "shape":"NotificationConfigurationDescription", + "documentation":"The description of the NotificationConfiguration.
" + }, + "status":{ + "shape":"NotificationConfigurationStatus", + "documentation":"The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted. Only GET
and LIST
calls can be run.
Only GET
and LIST
calls can be run.
The creation time of the NotificationConfiguration.
" + }, + "aggregationDuration":{ + "shape":"AggregationDuration", + "documentation":"The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
The Amazon Resource Name (ARN) of the NotificationEvent to return.
", + "location":"uri", + "locationName":"arn" + }, + "locale":{ + "shape":"LocaleCode", + "documentation":"The locale code of the language used for the retrieved NotificationEvent. The default locale is English en_US
.
The ARN of the resource.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the NotificationConfiguration.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The creation time of the NotificationEvent.
" + }, + "content":{ + "shape":"NotificationEvent", + "documentation":"The content of the NotificationEvent.
" + } + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"Unexpected error during processing of request.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "LastActivationTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ListChannelsRequest":{ + "type":"structure", + "required":["notificationConfigurationArn"], + "members":{ + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration.
", + "location":"querystring", + "locationName":"notificationConfigurationArn" + }, + "maxResults":{ + "shape":"ListChannelsRequestMaxResultsInteger", + "documentation":"The maximum number of results to be returned in this call. The default value is 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListNotificationEvents call. NextToken uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListChannelsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListChannelsResponse":{ + "type":"structure", + "required":["channels"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "channels":{ + "shape":"Channels", + "documentation":"A list of Channels.
" + } + } + }, + "ListEventRulesRequest":{ + "type":"structure", + "required":["notificationConfigurationArn"], + "members":{ + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the NotificationConfiguration.
", + "location":"querystring", + "locationName":"notificationConfigurationArn" + }, + "maxResults":{ + "shape":"ListEventRulesRequestMaxResultsInteger", + "documentation":"The maximum number of results to be returned in this call. The default value is 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListEventRulesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListEventRulesResponse":{ + "type":"structure", + "required":["eventRules"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "eventRules":{ + "shape":"EventRules", + "documentation":"A list of EventRules.
" + } + } + }, + "ListNotificationConfigurationsRequest":{ + "type":"structure", + "members":{ + "eventRuleSource":{ + "shape":"Source", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The Amazon Resource Name (ARN) of the Channel to match.
", + "location":"querystring", + "locationName":"channelArn" + }, + "status":{ + "shape":"NotificationConfigurationStatus", + "documentation":"The NotificationConfiguration status to match.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The maximum number of results to be returned in this call. Defaults to 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNotificationConfigurationsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListNotificationConfigurationsResponse":{ + "type":"structure", + "required":["notificationConfigurations"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "notificationConfigurations":{ + "shape":"NotificationConfigurations", + "documentation":"The NotificationConfigurations in the account.
" + } + } + }, + "ListNotificationEventsRequest":{ + "type":"structure", + "members":{ + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The earliest time of events to return from this call.
", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"Latest time of events to return from this call.
", + "location":"querystring", + "locationName":"endTime" + }, + "locale":{ + "shape":"LocaleCode", + "documentation":"The locale code of the language used for the retrieved NotificationEvent. The default locale is English (en_US)
.
The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
Include aggregated child events in the result.
", + "location":"querystring", + "locationName":"includeChildEvents" + }, + "aggregateNotificationEventArn":{ + "shape":"NotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the aggregatedNotificationEventArn to match.
", + "location":"querystring", + "locationName":"aggregateNotificationEventArn" + }, + "maxResults":{ + "shape":"ListNotificationEventsRequestMaxResultsInteger", + "documentation":"The maximum number of results to be returned in this call. Defaults to 20.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The start token for paginated calls. Retrieved from the response of a previous ListEventRules call. Next token uses Base64 encoding.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNotificationEventsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListNotificationEventsResponse":{ + "type":"structure", + "required":["notificationEvents"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + }, + "notificationEvents":{ + "shape":"NotificationEvents", + "documentation":"The list of notification events.
" + } + } + }, + "ListNotificationHubsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListNotificationHubsRequestMaxResultsInteger", + "documentation":"The maximum number of records to list in a single response.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. Set to null to start listing notification hubs from the start.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNotificationHubsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":3, + "min":3 + }, + "ListNotificationHubsResponse":{ + "type":"structure", + "required":["notificationHubs"], + "members":{ + "notificationHubs":{ + "shape":"NotificationHubs", + "documentation":"The NotificationHubs in the account.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"A pagination token. If a non-null pagination token is returned in a result, pass its value in another request to retrieve more entries.
" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) to use to list tags.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"A list of tags for the specified ARN.
" + } + } + }, + "LocaleCode":{ + "type":"string", + "enum":[ + "de_DE", + "en_CA", + "en_US", + "en_UK", + "es_ES", + "fr_CA", + "fr_FR", + "id_ID", + "it_IT", + "ja_JP", + "ko_KR", + "pt_BR", + "tr_TR", + "zh_CN", + "zh_TW" + ] + }, + "ManagedRuleArn":{ + "type":"string", + "pattern":"arn:aws:events:[a-z-\\d]{2,25}:\\d{12}:rule\\/[a-zA-Z-\\d]{1,1024}" + }, + "ManagedRuleArns":{ + "type":"list", + "member":{"shape":"ManagedRuleArn"} + }, + "Media":{ + "type":"list", + "member":{"shape":"MediaElement"} + }, + "MediaElement":{ + "type":"structure", + "required":[ + "mediaId", + "type", + "url", + "caption" + ], + "members":{ + "mediaId":{ + "shape":"MediaId", + "documentation":"The unique ID for the media.
" + }, + "type":{ + "shape":"MediaElementType", + "documentation":"The type of media.
" + }, + "url":{ + "shape":"Url", + "documentation":"The url of the media.
" + }, + "caption":{ + "shape":"TextPartReference", + "documentation":"The caption of the media.
" + } + }, + "documentation":"Describes a media element.
" + }, + "MediaElementType":{ + "type":"string", + "enum":["IMAGE"] + }, + "MediaId":{ + "type":"string", + "max":256, + "min":1 + }, + "MessageComponents":{ + "type":"structure", + "members":{ + "headline":{ + "shape":"TextPartReference", + "documentation":"A sentence long summary. For example, titles or an email subject line.
" + }, + "paragraphSummary":{ + "shape":"TextPartReference", + "documentation":"A paragraph long or multiple sentence summary. For example, AWS Chatbot notifications.
" + }, + "completeDescription":{ + "shape":"TextPartReference", + "documentation":"A complete summary with all possible relevant information.
" + }, + "dimensions":{ + "shape":"Dimensions", + "documentation":"A list of properties in key-value pairs. Pairs are shown in order of importance from most important to least important. Channels may limit the number of dimensions shown to the notification viewer.
Included dimensions, keys, and values are subject to change.
Describes the components of a notification message.
" + }, + "MessageComponentsSummary":{ + "type":"structure", + "required":["headline"], + "members":{ + "headline":{ + "shape":"MessageComponentsSummaryHeadlineString", + "documentation":"A sentence long summary. For example, titles or an email subject line.
" + } + }, + "documentation":"Contains the headline message component.
" + }, + "MessageComponentsSummaryHeadlineString":{ + "type":"string", + "max":256, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[\\w+-/=]+" + }, + "NotificationConfigurationArn":{ + "type":"string", + "pattern":"arn:aws:notifications::[0-9]{12}:configuration/[a-z0-9]{27}" + }, + "NotificationConfigurationDescription":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[^\\u0001-\\u001F\\u007F-\\u009F]*" + }, + "NotificationConfigurationName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9_\\-]+" + }, + "NotificationConfigurationStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PARTIALLY_ACTIVE", + "INACTIVE", + "DELETING" + ] + }, + "NotificationConfigurationStructure":{ + "type":"structure", + "required":[ + "arn", + "name", + "description", + "status", + "creationTime" + ], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
" + }, + "name":{ + "shape":"NotificationConfigurationName", + "documentation":"The name of the NotificationConfiguration. Supports RFC 3986's unreserved characters.
" + }, + "description":{ + "shape":"NotificationConfigurationDescription", + "documentation":"The description of the NotificationConfiguration.
" + }, + "status":{ + "shape":"NotificationConfigurationStatus", + "documentation":"The status of this NotificationConfiguration.
The status should always be INACTIVE when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted. Only GET
and LIST
calls can be run.
Only GET
and LIST
calls can be run.
The creation time of the resource.
" + }, + "aggregationDuration":{ + "shape":"AggregationDuration", + "documentation":"The aggregation preference of the NotificationConfiguration.
Values:
LONG
Aggregate notifications for long periods of time (12 hours).
SHORT
Aggregate notifications for short periods of time (5 minutes).
NONE
Don't aggregate notifications.
No delay in delivery.
Contains the complete list of fields for a NotificationConfiguration.
" + }, + "NotificationConfigurations":{ + "type":"list", + "member":{"shape":"NotificationConfigurationStructure"} + }, + "NotificationEvent":{ + "type":"structure", + "required":[ + "schemaVersion", + "id", + "sourceEventMetadata", + "messageComponents", + "notificationType", + "textParts", + "media" + ], + "members":{ + "schemaVersion":{ + "shape":"SchemaVersion", + "documentation":"The schema version of the Notification Event.
" + }, + "id":{ + "shape":"NotificationEventId", + "documentation":"The unique identifier for a NotificationEvent.
" + }, + "sourceEventMetadata":{ + "shape":"SourceEventMetadata", + "documentation":"The source event metadata.
" + }, + "messageComponents":{"shape":"MessageComponents"}, + "sourceEventDetailUrl":{ + "shape":"Url", + "documentation":"The source event URL.
" + }, + "sourceEventDetailUrlDisplayText":{ + "shape":"String", + "documentation":"The detailed URL for the source event.
" + }, + "notificationType":{ + "shape":"NotificationType", + "documentation":"The type of event causing the notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
The assesed nature of the event.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
The NotificationConfiguration's aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
If the value of aggregationEventType is not NONE
, this is the Amazon Resource Event (ARN) of the parent aggregate notification.
This is omitted if notification isn't aggregated.
" + }, + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The notification event start time.
" + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The end time of the event.
" + }, + "textParts":{ + "shape":"TextParts", + "documentation":"A list of text values.
" + }, + "media":{ + "shape":"Media", + "documentation":"A list of media elements.
" + } + }, + "documentation":"A NotificationEvent is a notification-focused representation of an event. They contain semantic information used by Channels to create end-user notifications.
" + }, + "NotificationEventArn":{ + "type":"string", + "pattern":"arn:[-.a-z0-9]{1,63}:notifications:[-.a-z0-9]{1,63}:[0-9]{12}:configuration/[a-z0-9]{27}/event/[a-z0-9]{27}" + }, + "NotificationEventId":{ + "type":"string", + "pattern":"[a-z0-9]{27}" + }, + "NotificationEventOverview":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn", + "relatedAccount", + "creationTime", + "notificationEvent" + ], + "members":{ + "arn":{ + "shape":"NotificationEventArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the NotificationConfiguration.
" + }, + "relatedAccount":{ + "shape":"AccountId", + "documentation":"The account name containing the NotificationHub.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The creation time of the NotificationEvent.
" + }, + "notificationEvent":{ + "shape":"NotificationEventSummary", + "documentation":"Refers to a NotificationEventSummary object.
Similar in structure to content
in the GetNotificationEvent response.
The NotificationConfiguration's aggregation type.
Values:
AGGREGATE
The notification event is an aggregate notification. Aggregate notifications summarize grouped events over a specified time period.
CHILD
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
NONE
The notification isn't aggregated.
The ARN of the aggregatedNotificationEventArn to match.
" + } + }, + "documentation":"Describes a short summary of a NotificationEvent. This is only used when listing notification events.
" + }, + "NotificationEventSummary":{ + "type":"structure", + "required":[ + "schemaVersion", + "sourceEventMetadata", + "messageComponents", + "eventStatus", + "notificationType" + ], + "members":{ + "schemaVersion":{ + "shape":"SchemaVersion", + "documentation":"The schema version of the Notification Event.
" + }, + "sourceEventMetadata":{ + "shape":"SourceEventMetadataSummary", + "documentation":"The source event metadata.
" + }, + "messageComponents":{ + "shape":"MessageComponentsSummary", + "documentation":"The message components of a notification event.
" + }, + "eventStatus":{ + "shape":"EventStatus", + "documentation":"The notification event status.
Values:
HEALTHY
All EventRules are ACTIVE
and any call can be run.
UNHEALTHY
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
The type of event causing the notification.
Values:
ALERT
A notification about an event where something was triggered, initiated, reopened, deployed, or a threshold was breached.
WARNING
A notification about an event where an issue is about to arise. For example, something is approaching a threshold.
ANNOUNCEMENT
A notification about an important event. For example, a step in a workflow or escalation path or that a workflow was updated.
INFORMATIONAL
A notification about informational messages. For example, recommendations, service announcements, or reminders.
Describes a short summary and metadata for a notification event.
" + }, + "NotificationEvents":{ + "type":"list", + "member":{"shape":"NotificationEventOverview"} + }, + "NotificationHubOverview":{ + "type":"structure", + "required":[ + "notificationHubRegion", + "statusSummary", + "creationTime" + ], + "members":{ + "notificationHubRegion":{ + "shape":"Region", + "documentation":"The Region of the resource.
" + }, + "statusSummary":{ + "shape":"NotificationHubStatusSummary", + "documentation":"The status summary of the resource.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The date and time the resource was created.
" + }, + "lastActivationTime":{ + "shape":"LastActivationTime", + "documentation":"The most recent time this NotificationHub had an ACTIVE status.
" + } + }, + "documentation":"Describes an overview of a NotificationHub.
A NotificationHub is an account-level setting used to select the Regions where you want to store, process and replicate your notifications.
" + }, + "NotificationHubStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "REGISTERING", + "DEREGISTERING", + "INACTIVE" + ] + }, + "NotificationHubStatusReason":{"type":"string"}, + "NotificationHubStatusSummary":{ + "type":"structure", + "required":[ + "status", + "reason" + ], + "members":{ + "status":{ + "shape":"NotificationHubStatus", + "documentation":"Status information about the NotificationHub.
Values:
ACTIVE
Incoming NotificationEvents are replicated to this NotificationHub.
REGISTERING
The NotificationHub is initializing. A NotificationHub with this status can't be deregistered.
DEREGISTERING
The NotificationHub is being deleted. You can't register additional NotificationHubs in the same Region as a NotificationHub with this status.
An Explanation for the current status.
" + } + }, + "documentation":"NotificationHub status information.
" + }, + "NotificationHubs":{ + "type":"list", + "member":{"shape":"NotificationHubOverview"} + }, + "NotificationType":{ + "type":"string", + "enum":[ + "ALERT", + "WARNING", + "ANNOUNCEMENT", + "INFORMATIONAL" + ] + }, + "QuotaCode":{"type":"string"}, + "Region":{ + "type":"string", + "max":25, + "min":2, + "pattern":"([a-z]{1,2})-([a-z]{1,15}-)+([0-9])" + }, + "Regions":{ + "type":"list", + "member":{"shape":"Region"}, + "min":1 + }, + "RegisterNotificationHubRequest":{ + "type":"structure", + "required":["notificationHubRegion"], + "members":{ + "notificationHubRegion":{ + "shape":"Region", + "documentation":"The Region of the NotificationHub.
" + } + } + }, + "RegisterNotificationHubResponse":{ + "type":"structure", + "required":[ + "notificationHubRegion", + "statusSummary", + "creationTime" + ], + "members":{ + "notificationHubRegion":{ + "shape":"Region", + "documentation":"The Region of the NotificationHub.
" + }, + "statusSummary":{ + "shape":"NotificationHubStatusSummary", + "documentation":"NotificationHub status information.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The date the resource was created.
" + }, + "lastActivationTime":{ + "shape":"LastActivationTime", + "documentation":"The date the resource was last activated.
" + } + } + }, + "Resource":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"The unique identifier for the resource.
At least one id or ARN is required.
" + }, + "arn":{ + "shape":"Arn", + "documentation":"The Amazon Resource Name (ARN) of the resource. At least one id or ARN is required.
" + }, + "detailUrl":{ + "shape":"Url", + "documentation":"The URL to the resource's detail page. If a detail page URL is unavailable, it is the URL to an informational page that describes the resource's type.
" + }, + "tags":{ + "shape":"Tags", + "documentation":"A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.
" + } + }, + "documentation":"A resource affected by or closely linked to an event.
" + }, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The ID of the resource that wasn't found.
" + } + }, + "documentation":"Request references a resource which does not exist.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{"type":"string"}, + "Resources":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "SchemaVersion":{ + "type":"string", + "enum":["v1.0"] + }, + "ServiceCode":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceType" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The type of the resource that exceeds the service quota.
" + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The ID of the resource that exceeds the service quota.
" + }, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"The code for the service quota exceeded in Service Quotas.
" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"The code for the service quota in Service Quotas.
" + } + }, + "documentation":"Request would cause a service quota to be exceeded.
", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "Source":{ + "type":"string", + "max":36, + "min":1, + "pattern":"aws.([a-z0-9\\-])+" + }, + "SourceEventMetadata":{ + "type":"structure", + "required":[ + "eventTypeVersion", + "sourceEventId", + "relatedAccount", + "source", + "eventOccurrenceTime", + "eventType", + "relatedResources" + ], + "members":{ + "eventTypeVersion":{ + "shape":"SourceEventMetadataEventTypeVersionString", + "documentation":"The version of the type of event.
" + }, + "sourceEventId":{ + "shape":"String", + "documentation":"The source event id.
" + }, + "eventOriginRegion":{ + "shape":"SourceEventMetadataEventOriginRegionString", + "documentation":"The Region the event originated from.
" + }, + "relatedAccount":{ + "shape":"SourceEventMetadataRelatedAccountString", + "documentation":"The Primary AWS account of Source Event
" + }, + "source":{ + "shape":"Source", + "documentation":"The AWS servvice the event originates from. For example aws.cloudwatch
.
The date and time the source event occurred. This is based on the Source Event.
" + }, + "eventType":{ + "shape":"SourceEventMetadataEventTypeString", + "documentation":"The type of event. For example, an AWS CloudWatch state change.
" + }, + "relatedResources":{ + "shape":"Resources", + "documentation":"A list of resources related to this NotificationEvent.
" + } + }, + "documentation":"Describes the metadata for a source event.
For more information, see Event structure reference in the Amazon EventBridge User Guide.
" + }, + "SourceEventMetadataEventOriginRegionString":{ + "type":"string", + "max":32, + "min":0, + "pattern":"([a-z]{1,2})-([a-z]{1,15}-)+([0-9])" + }, + "SourceEventMetadataEventTypeString":{ + "type":"string", + "max":256, + "min":1 + }, + "SourceEventMetadataEventTypeVersionString":{ + "type":"string", + "max":3, + "min":1, + "pattern":"[0-9.]+" + }, + "SourceEventMetadataRelatedAccountString":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "SourceEventMetadataSummary":{ + "type":"structure", + "required":[ + "source", + "eventType" + ], + "members":{ + "eventOriginRegion":{ + "shape":"SourceEventMetadataSummaryEventOriginRegionString", + "documentation":"The Region where the notification originated.
Unavailable for aggregated notifications.
" + }, + "source":{ + "shape":"String", + "documentation":"The matched event source.
Must match one of the valid EventBridge sources. Only AWS service sourced events are supported. For example, aws.ec2
and aws.cloudwatch
. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
The event type to match.
Must match one of the valid Amazon EventBridge event types. For example, EC2 Instance State-change Notification and AWS CloudWatch Alarm State Change. For more information, see Event delivery from AWS services in the Amazon EventBridge User Guide.
" + } + }, + "documentation":"Contains metadata about the event that caused the NotificationEvent. For other specific values, see sourceEventMetadata.
" + }, + "SourceEventMetadataSummaryEventOriginRegionString":{ + "type":"string", + "max":32, + "min":0 + }, + "SourceEventMetadataSummaryEventTypeString":{ + "type":"string", + "max":256, + "min":1 + }, + "StatusSummaryByRegion":{ + "type":"map", + "key":{"shape":"Region"}, + "value":{"shape":"EventRuleStatusSummary"} + }, + "String":{"type":"string"}, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "TagKey":{ + "type":"string", + "pattern":"(?!aws:).{1,128}" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "arn", + "tags" + ], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) to use to tag a resource.
", + "location":"uri", + "locationName":"arn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.
" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"list", + "member":{"shape":"TagsMemberString"}, + "max":50, + "min":0 + }, + "TagsMemberString":{ + "type":"string", + "max":256, + "min":1 + }, + "TextByLocale":{ + "type":"map", + "key":{"shape":"LocaleCode"}, + "value":{"shape":"String"} + }, + "TextPartId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[A-Za-z0-9_]+" + }, + "TextPartReference":{"type":"string"}, + "TextPartType":{ + "type":"string", + "enum":[ + "LOCALIZED_TEXT", + "PLAIN_TEXT", + "URL" + ] + }, + "TextPartValue":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"TextPartType", + "documentation":"The type of text part. Determines the usage of all other fields and whether or not they're required.
" + }, + "displayText":{ + "shape":"TextPartValueDisplayTextString", + "documentation":"A short single line description of the link. Must be hyperlinked with the URL itself.
Used for text parts with the type URL
.
A map of locales to the text in that locale.
" + }, + "url":{ + "shape":"Url", + "documentation":"The URL itself.
" + } + }, + "documentation":"Describes text information objects containing fields that determine how text part objects are composed.
" + }, + "TextPartValueDisplayTextString":{ + "type":"string", + "max":1024, + "min":1 + }, + "TextParts":{ + "type":"map", + "key":{"shape":"TextPartId"}, + "value":{"shape":"TextPartValue"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"}, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"Identifies the service being throttled.
" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"Identifies the quota that is being throttled.
" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"The number of seconds a client should wait before retrying the request.
", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"Request was denied due to request throttling.
", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "arn", + "tagKeys" + ], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) to use to untag a resource.
", + "location":"uri", + "locationName":"arn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"The tag keys to use to untag a resource.
", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateEventRuleRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The Amazon Resource Name (ARN) to use to update the EventRule.
", + "location":"uri", + "locationName":"arn" + }, + "eventPattern":{ + "shape":"EventRuleEventPattern", + "documentation":"An additional event pattern used to further filter the events this EventRule receives.
For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide.
" + }, + "regions":{ + "shape":"Regions", + "documentation":"A list of AWS Regions that sends events to this EventRule.
" + } + } + }, + "UpdateEventRuleResponse":{ + "type":"structure", + "required":[ + "arn", + "notificationConfigurationArn", + "statusSummaryByRegion" + ], + "members":{ + "arn":{ + "shape":"EventRuleArn", + "documentation":"The Amazon Resource Name (ARN) to use to update the EventRule.
" + }, + "notificationConfigurationArn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The ARN of the NotificationConfiguration.
" + }, + "statusSummaryByRegion":{ + "shape":"StatusSummaryByRegion", + "documentation":"The status of the action by Region.
" + } + } + }, + "UpdateNotificationConfigurationRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NotificationConfigurationArn", + "documentation":"The Amazon Resource Name (ARN) used to update the NotificationConfiguration.
", + "location":"uri", + "locationName":"arn" + }, + "name":{ + "shape":"NotificationConfigurationName", + "documentation":"The name of the NotificationConfiguration.
" + }, + "description":{ + "shape":"NotificationConfigurationDescription", + "documentation":"The description of the NotificationConfiguration.
" + }, + "aggregationDuration":{ + "shape":"AggregationDuration", + "documentation":"The status of this NotificationConfiguration.
The status should always be INACTIVE
when part of the CreateNotificationConfiguration response.
Values:
ACTIVE
All EventRules are ACTIVE
and any call can be run.
PARTIALLY_ACTIVE
Some EventRules are ACTIVE
and some are INACTIVE
. Any call can be run.
Any call can be run.
INACTIVE
All EventRules are INACTIVE
and any call can be run.
DELETING
This NotificationConfiguration is being deleted.
Only GET
and LIST
calls can be run.
The ARN used to update the NotificationConfiguration.
" + } + } + }, + "Url":{ + "type":"string", + "max":2000, + "min":0, + "pattern":"(https?)://.*" + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"The reason why your input is considered invalid.
" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"The list of input fields that are invalid.
" + } + }, + "documentation":"This exception is thrown when the notification event fails validation.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"The field name where the invalid entry was detected.
" + }, + "message":{ + "shape":"String", + "documentation":"A message with the reason for the validation exception error.
" + } + }, + "documentation":"Stores information about a field passed inside a request that resulted in an exception.
" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "fieldValidationFailed", + "other" + ] + } + }, + "documentation":"The AWS User Notifications API Reference provides descriptions, API request parameters, and the JSON response for each of the User Notification API actions.
User Notification control APIs are currently available in US East (Virginia) - us-east-1
.
GetNotificationEvent and ListNotificationEvents APIs are currently available in commercial partition Regions and only return notifications stored in the same Region in which they're called.
The User Notifications console can only be used in US East (Virginia). Your data however, is stored in each Region chosen as a notification hub in addition to US East (Virginia).
" +} diff --git a/botocore/data/notifications/2018-05-10/waiters-2.json b/botocore/data/notifications/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/notifications/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/notificationscontacts/2018-05-10/endpoint-rule-set-1.json b/botocore/data/notificationscontacts/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..2234fb31a5 --- /dev/null +++ b/botocore/data/notificationscontacts/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,151 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://notifications-contacts-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://notifications-contacts.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/notificationscontacts/2018-05-10/paginators-1.json b/botocore/data/notificationscontacts/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..39f96a36e7 --- /dev/null +++ b/botocore/data/notificationscontacts/2018-05-10/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListEmailContacts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "emailContacts" + } + } +} diff --git a/botocore/data/notificationscontacts/2018-05-10/service-2.json b/botocore/data/notificationscontacts/2018-05-10/service-2.json new file mode 100644 index 0000000000..718819e063 --- /dev/null +++ b/botocore/data/notificationscontacts/2018-05-10/service-2.json @@ -0,0 +1,720 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"notifications-contacts", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS User Notifications Contacts", + "serviceId":"NotificationsContacts", + "signatureVersion":"v4", + "signingName":"notifications-contacts", + "uid":"notificationscontacts-2018-05-10" + }, + "operations":{ + "ActivateEmailContact":{ + "name":"ActivateEmailContact", + "http":{ + "method":"PUT", + "requestUri":"/emailcontacts/{arn}/activate/{code}", + "responseCode":200 + }, + "input":{"shape":"ActivateEmailContactRequest"}, + "output":{"shape":"ActivateEmailContactResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Activates an email contact using an activation code. This code is in the activation email sent to the email address associated with this email contact.
", + "idempotent":true + }, + "CreateEmailContact":{ + "name":"CreateEmailContact", + "http":{ + "method":"POST", + "requestUri":"/2022-09-19/emailcontacts", + "responseCode":201 + }, + "input":{"shape":"CreateEmailContactRequest"}, + "output":{"shape":"CreateEmailContactResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates an email contact for the provided email address.
" + }, + "DeleteEmailContact":{ + "name":"DeleteEmailContact", + "http":{ + "method":"DELETE", + "requestUri":"/emailcontacts/{arn}", + "responseCode":200 + }, + "input":{"shape":"DeleteEmailContactRequest"}, + "output":{"shape":"DeleteEmailContactResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Deletes an email contact.
Deleting an email contact removes it from all associated notification configurations.
Returns an email contact.
" + }, + "ListEmailContacts":{ + "name":"ListEmailContacts", + "http":{ + "method":"GET", + "requestUri":"/emailcontacts", + "responseCode":200 + }, + "input":{"shape":"ListEmailContactsRequest"}, + "output":{"shape":"ListEmailContactsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Lists all email contacts created under the Account.
" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{arn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The resource can be a user, server, or role.
" + }, + "SendActivationCode":{ + "name":"SendActivationCode", + "http":{ + "method":"POST", + "requestUri":"/2022-10-31/emailcontacts/{arn}/activate/send", + "responseCode":200 + }, + "input":{"shape":"SendActivationCodeRequest"}, + "output":{"shape":"SendActivationCodeResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Sends an activation email to the email address associated with the specified email contact.
It might take a few minutes for the activation email to arrive. If it doesn't arrive, check in your spam folder or try sending another activation email.
Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Taggable resources in AWS User Notifications Contacts include email contacts.
", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{arn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). Taggable resources in AWS User Notifications Contacts include email contacts..
", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"You do not have sufficient access to perform this action.
", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ActivateEmailContactRequest":{ + "type":"structure", + "required":[ + "arn", + "code" + ], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
", + "location":"uri", + "locationName":"arn" + }, + "code":{ + "shape":"Token", + "documentation":"The activation code for this email contact.
An email contact has a maximum of five activation attempts. Activation codes expire after 12 hours and are generated by the SendActivationCode API action.
", + "location":"uri", + "locationName":"code" + } + } + }, + "ActivateEmailContactResponse":{ + "type":"structure", + "members":{ + } + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The resource ID that prompted the conflict error.
" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The resource type that prompted the conflict error.
" + } + }, + "documentation":"Updating or deleting a resource can cause an inconsistent state.
", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateEmailContactRequest":{ + "type":"structure", + "required":[ + "name", + "emailAddress" + ], + "members":{ + "name":{ + "shape":"EmailContactName", + "documentation":"The name of the email contact.
" + }, + "emailAddress":{ + "shape":"EmailContactAddress", + "documentation":"The email address this email contact points to. The activation email and any subscribed emails are sent here.
This email address can't receive emails until it's activated.
A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.
" + } + } + }, + "CreateEmailContactResponse":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
" + } + } + }, + "CreationTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "DeleteEmailContactRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "DeleteEmailContactResponse":{ + "type":"structure", + "members":{ + } + }, + "EmailContact":{ + "type":"structure", + "required":[ + "arn", + "name", + "address", + "status", + "creationTime", + "updateTime" + ], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the email contact.
" + }, + "name":{ + "shape":"EmailContactName", + "documentation":"The name of the email contact.
" + }, + "address":{ + "shape":"SensitiveEmailContactAddress", + "documentation":"The email address this email contact points to. The activation email and any subscribed emails are sent here.
" + }, + "status":{ + "shape":"EmailContactStatus", + "documentation":"The status of the email contact. Only activated email contacts receive emails.
" + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"The creation time of the resource.
" + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"The time the resource was last updated.
" + } + }, + "documentation":"An email contact.
" + }, + "EmailContactAddress":{ + "type":"string", + "max":254, + "min":6, + "pattern":"(.+)@(.+)" + }, + "EmailContactArn":{ + "type":"string", + "pattern":"arn:aws:notifications-contacts::[0-9]{12}:emailcontact/[a-z0-9]{27}" + }, + "EmailContactName":{ + "type":"string", + "max":64, + "min":1, + "pattern":".*[\\w-.~]+.*", + "sensitive":true + }, + "EmailContactStatus":{ + "type":"string", + "enum":[ + "inactive", + "active" + ] + }, + "EmailContacts":{ + "type":"list", + "member":{"shape":"EmailContact"} + }, + "ErrorMessage":{"type":"string"}, + "GetEmailContactRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the email contact to get.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "GetEmailContactResponse":{ + "type":"structure", + "required":["emailContact"], + "members":{ + "emailContact":{ + "shape":"EmailContact", + "documentation":"The email contact for the provided email address.
" + } + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"Unexpected error during processing of request.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListEmailContactsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListEmailContactsRequestMaxResultsInteger", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.
", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListEmailContactsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListEmailContactsResponse":{ + "type":"structure", + "required":["emailContacts"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.
" + }, + "emailContacts":{ + "shape":"EmailContacts", + "documentation":"A list of email contacts.
" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The ARN you specified to list the tags of.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"Key-value pairs that are assigned to a resource, usually for the purpose of grouping and searching for items. Tags are metadata that you define.
" + } + } + }, + "QuotaCode":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The ID of the resource that wasn't found.
" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The type of resource that wasn't found.
" + } + }, + "documentation":"Your request references a resource which does not exist.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{"type":"string"}, + "SendActivationCodeRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The Amazon Resource Name (ARN) of the resource.
", + "location":"uri", + "locationName":"arn" + } + } + }, + "SendActivationCodeResponse":{ + "type":"structure", + "members":{ + } + }, + "SensitiveEmailContactAddress":{ + "type":"string", + "max":254, + "min":6, + "pattern":"(.+)@(.+)", + "sensitive":true + }, + "ServiceCode":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The ID of the resource that exceeds the service quota.
" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The type of the resource that exceeds the service quota.
" + }, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"The code for the service quota exceeded in Service Quotas.
" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"The code for the service quota in Service Quotas.
" + } + }, + "documentation":"Request would cause a service quota to be exceeded.
", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "pattern":"(?!aws:).{1,128}" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "documentation":"Map of tags assigned to a resource
", + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "arn", + "tags" + ], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The ARN of the configuration.
", + "location":"uri", + "locationName":"arn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"A list of tags to apply to the configuration.
" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"ErrorMessage"}, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"Identifies the service being throttled.
" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"Identifies the quota that is being throttled.
" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"The number of seconds a client should wait before retrying the request.
", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"The request was denied due to request throttling.
", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Token":{ + "type":"string", + "max":7, + "min":7, + "pattern":"[a-z0-9]{7}", + "sensitive":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "arn", + "tagKeys" + ], + "members":{ + "arn":{ + "shape":"EmailContactArn", + "documentation":"The value of the resource that will have the tag removed. An Amazon Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, or role.
", + "location":"uri", + "locationName":"arn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"Specifies a list of tag keys that you want to remove from the specified resources.
", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{"shape":"ErrorMessage"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"The reason why your input is considered invalid.
" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"The list of input fields that are invalid.
" + } + }, + "documentation":"The input fails to satisfy the constraints specified by an AWS service.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"The field name where the invalid entry was detected.
" + }, + "message":{ + "shape":"String", + "documentation":"A message with the reason for the validation exception error.
" + } + }, + "documentation":"Stores information about a field passed inside a request that resulted in an exception.
" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "fieldValidationFailed", + "other" + ] + } + }, + "documentation":"AWS User Notifications Contacts is a service that allows you to create and manage email contacts for AWS User Notifications. The AWS User Notifications Contacts API Reference provides descriptions, API request parameters, and the JSON response for all email contact related API actions.
" +} diff --git a/botocore/data/notificationscontacts/2018-05-10/waiters-2.json b/botocore/data/notificationscontacts/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/notificationscontacts/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/resiliencehub/2020-04-30/paginators-1.json b/botocore/data/resiliencehub/2020-04-30/paginators-1.json index f98356da47..665877a81d 100644 --- a/botocore/data/resiliencehub/2020-04-30/paginators-1.json +++ b/botocore/data/resiliencehub/2020-04-30/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "groupingRecommendations" + }, + "ListMetrics": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "rows" } } } diff --git a/botocore/data/resiliencehub/2020-04-30/service-2.json b/botocore/data/resiliencehub/2020-04-30/service-2.json index 8c109c1c5f..6a8aaa3029 100644 --- a/botocore/data/resiliencehub/2020-04-30/service-2.json +++ b/botocore/data/resiliencehub/2020-04-30/service-2.json @@ -446,6 +446,24 @@ ], "documentation":"Describes the status of importing resources to an application version.
If you get a 404 error with ResourceImportStatusNotFoundAppMetadataException
, you must call importResourcesToDraftAppVersion
after creating the application and before calling describeDraftAppVersionResourcesImportStatus
to obtain the status.
Describes the metrics of the application configuration being exported.
" + }, "DescribeResiliencyPolicy":{ "name":"DescribeResiliencyPolicy", "http":{ @@ -535,7 +553,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Indicates the list of compliance drifts that were detected while running an assessment.
" + "documentation":"List of compliance drifts that were detected while running an assessment.
" }, "ListAppAssessmentResourceDrifts":{ "name":"ListAppAssessmentResourceDrifts", @@ -552,7 +570,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Indicates the list of resource drifts that were detected while running an assessment.
" + "documentation":"List of resource drifts that were detected while running an assessment.
" }, "ListAppAssessments":{ "name":"ListAppAssessments", @@ -716,6 +734,23 @@ ], "documentation":"Lists your Resilience Hub applications.
You can filter applications using only one filter at a time or without using any filter. If you try to filter applications using multiple filters, you will get the following error:
An error occurred (ValidationException) when calling the ListApps operation: Only one filter is supported for this operation.
Lists the metrics that can be exported.
" + }, "ListRecommendationTemplates":{ "name":"ListRecommendationTemplates", "http":{ @@ -976,6 +1011,25 @@ ], "documentation":"Creates a new application assessment for an application.
" }, + "StartMetricsExport":{ + "name":"StartMetricsExport", + "http":{ + "method":"POST", + "requestUri":"/start-metrics-export", + "responseCode":200 + }, + "input":{"shape":"StartMetricsExportRequest"}, + "output":{"shape":"StartMetricsExportResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Initiates the export task of metrics.
" + }, "StartResourceGroupingRecommendationTask":{ "name":"StartResourceGroupingRecommendationTask", "http":{ @@ -1159,7 +1213,7 @@ }, "entries":{ "shape":"AcceptGroupingRecommendationEntries", - "documentation":"Indicates the list of resource grouping recommendations you want to include in your application.
" + "documentation":"List of resource grouping recommendations you want to include in your application.
" } } }, @@ -1176,7 +1230,7 @@ }, "failedEntries":{ "shape":"FailedGroupingRecommendationEntries", - "documentation":"Indicates the list of resource grouping recommendations that could not be included in your application.
" + "documentation":"List of resource grouping recommendations that could not be included in your application.
" } } }, @@ -1342,7 +1396,7 @@ }, "creationTime":{ "shape":"TimeStamp", - "documentation":"Date and time when the app was created.
" + "documentation":"Date and time when the application was created.
" }, "description":{ "shape":"EntityDescription", @@ -1971,7 +2025,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[A-za-z0-9_.-]{0,63}$" + "pattern":"^[A-Za-z0-9_.-]{0,63}$" }, "ComplianceDrift":{ "type":"structure", @@ -2063,6 +2117,45 @@ "type":"list", "member":{"shape":"ComponentRecommendation"} }, + "Condition":{ + "type":"structure", + "required":[ + "field", + "operator" + ], + "members":{ + "field":{ + "shape":"String255", + "documentation":"Indicates the field in the metric.
" + }, + "operator":{ + "shape":"ConditionOperatorType", + "documentation":"Indicates the type of operator or comparison to be used when evaluating a condition against the specified field.
" + }, + "value":{ + "shape":"String255", + "documentation":"Indicates the value or data against which a condition is evaluated.
" + } + }, + "documentation":"Indicates the condition based on which you want to filter the metrics.
" + }, + "ConditionList":{ + "type":"list", + "member":{"shape":"Condition"}, + "max":50, + "min":0 + }, + "ConditionOperatorType":{ + "type":"string", + "enum":[ + "Equals", + "NotEquals", + "GreaterThen", + "GreaterOrEquals", + "LessThen", + "LessOrEquals" + ] + }, "ConfigRecommendation":{ "type":"structure", "required":[ @@ -2438,11 +2531,11 @@ }, "policyDescription":{ "shape":"EntityDescription", - "documentation":"The description for the policy.
" + "documentation":"Description of the resiliency policy.
" }, "policyName":{ "shape":"EntityName", - "documentation":"The name of the policy
" + "documentation":"Name of the resiliency policy.
" }, "tags":{ "shape":"TagMap", @@ -3053,6 +3146,41 @@ } } }, + "DescribeMetricsExportRequest":{ + "type":"structure", + "required":["metricsExportId"], + "members":{ + "metricsExportId":{ + "shape":"String255", + "documentation":"Identifier of the metrics export task.
" + } + } + }, + "DescribeMetricsExportResponse":{ + "type":"structure", + "required":[ + "metricsExportId", + "status" + ], + "members":{ + "errorMessage":{ + "shape":"String500", + "documentation":"Explains the error that occurred while exporting the metrics.
" + }, + "exportLocation":{ + "shape":"S3Location", + "documentation":"Specifies the name of the Amazon S3 bucket where the exported metrics is stored.
" + }, + "metricsExportId":{ + "shape":"String255", + "documentation":"Identifier for the metrics export task.
" + }, + "status":{ + "shape":"MetricsExportStatusType", + "documentation":"Indicates the status of the metrics export task.
" + } + } + }, "DescribeResiliencyPolicyRequest":{ "type":"structure", "required":["policyArn"], @@ -3083,7 +3211,7 @@ }, "groupingId":{ "shape":"String255", - "documentation":"Indicates the identifier of the grouping recommendation task.
" + "documentation":"Identifier of the grouping recommendation task.
" } } }, @@ -3096,11 +3224,11 @@ "members":{ "errorMessage":{ "shape":"String500", - "documentation":"Indicates the error that occurred while generating a grouping recommendation.
" + "documentation":"Error that occurred while generating a grouping recommendation.
" }, "groupingId":{ "shape":"String255", - "documentation":"Indicates the identifier of the grouping recommendation task.
" + "documentation":"Identifier of the grouping recommendation task.
" }, "status":{ "shape":"ResourcesGroupingRecGenStatusType", @@ -3389,6 +3517,37 @@ }, "documentation":"Defines a failure policy.
" }, + "Field":{ + "type":"structure", + "required":["name"], + "members":{ + "aggregation":{ + "shape":"FieldAggregationType", + "documentation":"(Optional) Indicates the type of aggregation or summary operation (such as Sum, Average, and so on) to be performed on a particular field or set of data.
" + }, + "name":{ + "shape":"String255", + "documentation":"Name of the field.
" + } + }, + "documentation":"Indicates the field or attribute of a resource or data structure on which a condition is being applied or evaluated.
" + }, + "FieldAggregationType":{ + "type":"string", + "enum":[ + "Min", + "Max", + "Sum", + "Avg", + "Count" + ] + }, + "FieldList":{ + "type":"list", + "member":{"shape":"Field"}, + "max":50, + "min":0 + }, "GroupingAppComponent":{ "type":"structure", "required":[ @@ -3670,7 +3829,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"Indicates the maximum number of compliance drifts requested.
" + "documentation":"Maximum number of compliance drifts requested.
" }, "nextToken":{ "shape":"NextToken", @@ -3702,7 +3861,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"Indicates the maximum number of drift results to include in the response. If more results exist than the specified MaxResults
value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of drift results to include in the response. If more results exist than the specified MaxResults
value, a token is included in the response so that the remaining results can be retrieved.
Indicates the lower limit of the range that is used to filter applications based on their last assessment times.
", + "documentation":"Lower limit of the range that is used to filter applications based on their last assessment times.
", "location":"querystring", "locationName":"fromLastAssessmentTime" }, @@ -4120,7 +4279,7 @@ }, "toLastAssessmentTime":{ "shape":"TimeStamp", - "documentation":"Indicates the upper limit of the range that is used to filter the applications based on their last assessment times.
", + "documentation":"Upper limit of the range that is used to filter the applications based on their last assessment times.
", "location":"querystring", "locationName":"toLastAssessmentTime" } @@ -4140,6 +4299,49 @@ } } }, + "ListMetricsRequest":{ + "type":"structure", + "members":{ + "conditions":{ + "shape":"ConditionList", + "documentation":"Indicates the list of all the conditions that were applied on the metrics.
" + }, + "dataSource":{ + "shape":"String255", + "documentation":"Indicates the data source of the metrics.
" + }, + "fields":{ + "shape":"FieldList", + "documentation":"Indicates the list of fields in the data source.
" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"Maximum number of results to include in the response. If more results exist than the specified MaxResults
value, a token is included in the response so that the remaining results can be retrieved.
Null, or the token from a previous call to get the next set of results.
" + }, + "sorts":{ + "shape":"SortList", + "documentation":"(Optional) Indicates the order in which you want to sort the fields in the metrics. By default, the fields are sorted in the ascending order.
" + } + } + }, + "ListMetricsResponse":{ + "type":"structure", + "required":["rows"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"Token for the next set of results, or null if there are no more results.
" + }, + "rows":{ + "shape":"RowList", + "documentation":"Specifies all the list of metric values for each row of metrics.
" + } + } + }, "ListRecommendationTemplatesRequest":{ "type":"structure", "members":{ @@ -4217,7 +4419,7 @@ }, "policyName":{ "shape":"EntityName", - "documentation":"The name of the policy
", + "documentation":"Name of the resiliency policy.
", "location":"querystring", "locationName":"policyName" } @@ -4478,6 +4680,15 @@ "max":100, "min":1 }, + "MetricsExportStatusType":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Failed", + "Success" + ] + }, "NextToken":{ "type":"string", "pattern":"^\\S{1,2000}$" @@ -4871,7 +5082,7 @@ }, "entries":{ "shape":"RejectGroupingRecommendationEntries", - "documentation":"Indicates the list of resource grouping recommendations you have selected to exclude from your application.
" + "documentation":"List of resource grouping recommendations you have selected to exclude from your application.
" } } }, @@ -4888,7 +5099,7 @@ }, "failedEntries":{ "shape":"FailedGroupingRecommendationEntries", - "documentation":"Indicates the list of resource grouping recommendations that failed to get excluded in your application.
" + "documentation":"List of resource grouping recommendations that failed to get excluded in your application.
" } } }, @@ -4982,7 +5193,7 @@ }, "policyDescription":{ "shape":"EntityDescription", - "documentation":"The description for the policy.
" + "documentation":"Description of the resiliency policy.
" }, "policyName":{ "shape":"EntityName", @@ -5295,6 +5506,14 @@ "type":"integer", "box":true }, + "Row":{ + "type":"list", + "member":{"shape":"String255"} + }, + "RowList":{ + "type":"list", + "member":{"shape":"Row"} + }, "S3Location":{ "type":"structure", "members":{ @@ -5413,6 +5632,27 @@ "type":"string", "enum":["SSM"] }, + "Sort":{ + "type":"structure", + "required":["field"], + "members":{ + "ascending":{ + "shape":"BooleanOptional", + "documentation":"Indicates the name or identifier of the field or attribute that should be used as the basis for sorting the metrics.
" + }, + "field":{ + "shape":"String255", + "documentation":"Indicates the order in which you want to sort the metrics. By default, the list is sorted in ascending order. To sort the list in descending order, set this field to False.
" + } + }, + "documentation":"Indicates the sorting order of the fields in the metrics.
" + }, + "SortList":{ + "type":"list", + "member":{"shape":"Sort"}, + "max":50, + "min":0 + }, "SpecReferenceId":{ "type":"string", "max":500, @@ -5459,6 +5699,37 @@ } } }, + "StartMetricsExportRequest":{ + "type":"structure", + "members":{ + "bucketName":{ + "shape":"EntityName", + "documentation":"(Optional) Specifies the name of the Amazon Simple Storage Service bucket where the exported metrics will be stored.
" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. You should not reuse the same client token for other API requests.
", + "idempotencyToken":true + } + } + }, + "StartMetricsExportResponse":{ + "type":"structure", + "required":[ + "metricsExportId", + "status" + ], + "members":{ + "metricsExportId":{ + "shape":"String255", + "documentation":"Identifier of the metrics export task.
" + }, + "status":{ + "shape":"MetricsExportStatusType", + "documentation":"Indicates the status of the metrics export task.
" + } + } + }, "StartResourceGroupingRecommendationTaskRequest":{ "type":"structure", "required":["appArn"], @@ -5483,11 +5754,11 @@ }, "errorMessage":{ "shape":"String500", - "documentation":"Indicates the error that occurred while executing a grouping recommendation task.
" + "documentation":"Error that occurred while executing a grouping recommendation task.
" }, "groupingId":{ "shape":"String255", - "documentation":"Indicates the identifier of the grouping recommendation task.
" + "documentation":"Identifier of the grouping recommendation task.
" }, "status":{ "shape":"ResourcesGroupingRecGenStatusType", @@ -6011,7 +6282,7 @@ }, "policy":{ "shape":"DisruptionPolicy", - "documentation":"The type of resiliency policy to be created, including the recovery time objective (RTO) and recovery point objective (RPO) in seconds.
" + "documentation":"Resiliency policy to be created, including the recovery time objective (RTO) and recovery point objective (RPO) in seconds.
" }, "policyArn":{ "shape":"Arn", @@ -6019,11 +6290,11 @@ }, "policyDescription":{ "shape":"EntityDescription", - "documentation":"The description for the policy.
" + "documentation":"Description of the resiliency policy.
" }, "policyName":{ "shape":"EntityName", - "documentation":"The name of the policy
" + "documentation":"Name of the resiliency policy.
" }, "tier":{ "shape":"ResiliencyPolicyTier", @@ -6037,7 +6308,7 @@ "members":{ "policy":{ "shape":"ResiliencyPolicy", - "documentation":"The type of resiliency policy that was updated, including the recovery time objective (RTO) and recovery point objective (RPO) in seconds.
" + "documentation":"The resiliency policy that was updated, including the recovery time objective (RTO) and recovery point objective (RPO) in seconds.
" } } }, diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 63ad5c46e0..aaf68b83fe 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -129,7 +129,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketAnalyticsConfigurationRequest"}, - "documentation":"This operation is not supported by directory buckets.
Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
This operation is not supported for directory buckets.
Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
This operation is not supported by directory buckets.
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
Related Resources
", + "documentation":"This operation is not supported for directory buckets.
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
Related Resources
", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -169,7 +169,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketIntelligentTieringConfigurationRequest"}, - "documentation":"This operation is not supported by directory buckets.
Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to DeleteBucketIntelligentTieringConfiguration
include:
This operation is not supported for directory buckets.
Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to DeleteBucketIntelligentTieringConfiguration
include:
This operation is not supported by directory buckets.
Deletes an inventory configuration (identified by the inventory ID) from the bucket.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
Operations related to DeleteBucketInventoryConfiguration
include:
This operation is not supported for directory buckets.
Deletes an inventory configuration (identified by the inventory ID) from the bucket.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
Operations related to DeleteBucketInventoryConfiguration
include:
This operation is not supported by directory buckets.
Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.
There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "documentation":"Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
For more information about the object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -209,7 +209,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketMetricsConfigurationRequest"}, - "documentation":"This operation is not supported by directory buckets.
Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to DeleteBucketMetricsConfiguration
:
This operation is not supported for directory buckets.
Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to DeleteBucketMetricsConfiguration
:
This operation is not supported by directory buckets.
Removes OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to DeleteBucketOwnershipControls
:
This operation is not supported for directory buckets.
Removes OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to DeleteBucketOwnershipControls
:
This operation is not supported by directory buckets.
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 User Guide.
The following operations are related to DeleteBucketReplication
:
This operation is not supported for directory buckets.
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 User Guide.
The following operations are related to DeleteBucketReplication
:
This operation is not supported by directory buckets.
Deletes the tags from the bucket.
To use this operation, you must have permission to perform the s3:PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
The following operations are related to DeleteBucketTagging
:
This operation is not supported for directory buckets.
Deletes the tags from the bucket.
To use this operation, you must have permission to perform the s3:PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
The following operations are related to DeleteBucketTagging
:
This operation is not supported by directory buckets.
This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
This operation is not supported for directory buckets.
This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
This operation is not supported by directory buckets.
Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
To use this operation, you must have permission to perform the s3:DeleteObjectTagging
action.
To delete tags of a specific object version, add the versionId
query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging
action.
The following operations are related to DeleteObjectTagging
:
This operation is not supported for directory buckets.
Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
To use this operation, you must have permission to perform the s3:DeleteObjectTagging
action.
To delete tags of a specific object version, add the versionId
query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging
action.
The following operations are related to DeleteObjectTagging
:
This operation is not supported by directory buckets.
Removes the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to DeletePublicAccessBlock
:
This operation is not supported for directory buckets.
Removes the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to DeletePublicAccessBlock
:
This operation is not supported by directory buckets.
This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.
The following operations are related to GetBucketAccelerateConfiguration
:
This operation is not supported for directory buckets.
This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.
The following operations are related to GetBucketAccelerateConfiguration
:
This operation is not supported by directory buckets.
This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have the READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetBucketAcl
:
This operation is not supported for directory buckets.
This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have the READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetBucketAcl
:
This operation is not supported by directory buckets.
This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon S3 User Guide.
The following operations are related to GetBucketAnalyticsConfiguration
:
This operation is not supported for directory buckets.
This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon S3 User Guide.
The following operations are related to GetBucketAnalyticsConfiguration
:
This operation is not supported by directory buckets.
Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
For more information about CORS, see Enabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
This operation is not supported for directory buckets.
Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
For more information about CORS, see Enabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
This operation is not supported by directory buckets.
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to GetBucketIntelligentTieringConfiguration
include:
This operation is not supported for directory buckets.
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to GetBucketIntelligentTieringConfiguration
include:
This operation is not supported by directory buckets.
Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
The following operations are related to GetBucketInventoryConfiguration
:
This operation is not supported for directory buckets.
Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
The following operations are related to GetBucketInventoryConfiguration
:
For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter
element, you should see the updated version of this topic. This topic is provided for backward compatibility.
This operation is not supported by directory buckets.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycle
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycle
:
For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter
element, you should see the updated version of this topic. This topic is provided for backward compatibility.
This operation is not supported for directory buckets.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycle
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycle
:
This operation is not supported by directory buckets.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see GetBucketLifecycle. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier action,
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API, which is compatible with the new functionality. The previous version of the API supported filtering based only on an object key name prefix, which is supported for general purpose buckets for backward compatibility. For the related API description, see GetBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects, transitions and tag filters are not supported.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:GetLifecycleConfiguration
permission.
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:GetLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
This operation is not supported by directory buckets.
Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint
request parameter in a CreateBucket
request. For more information, see CreateBucket.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
We recommend that you use HeadBucket to return the Region that a bucket resides in. For backward compatibility, Amazon S3 continues to support GetBucketLocation.
The following operations are related to GetBucketLocation
:
This operation is not supported for directory buckets.
Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint
request parameter in a CreateBucket
request. For more information, see CreateBucket.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
We recommend that you use HeadBucket to return the Region that a bucket resides in. For backward compatibility, Amazon S3 continues to support GetBucketLocation.
The following operations are related to GetBucketLocation
:
This operation is not supported by directory buckets.
Returns the logging status of a bucket and the permissions users have to view and modify that status.
The following operations are related to GetBucketLogging
:
This operation is not supported for directory buckets.
Returns the logging status of a bucket and the permissions users have to view and modify that status.
The following operations are related to GetBucketLogging
:
This operation is not supported by directory buckets.
Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to GetBucketMetricsConfiguration
:
This operation is not supported for directory buckets.
Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to GetBucketMetricsConfiguration
:
This operation is not supported by directory buckets.
No longer used, see GetBucketNotificationConfiguration.
", + "documentation":"This operation is not supported for directory buckets.
No longer used, see GetBucketNotificationConfiguration.
", "deprecated":true, "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -519,7 +519,7 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfiguration"}, - "documentation":"This operation is not supported by directory buckets.
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following action is related to GetBucketNotification
:
This operation is not supported for directory buckets.
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following action is related to GetBucketNotification
:
This operation is not supported by directory buckets.
Retrieves OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to GetBucketOwnershipControls
:
This operation is not supported for directory buckets.
Retrieves OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using Object Ownership.
The following operations are related to GetBucketOwnershipControls
:
This operation is not supported by directory buckets.
Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".
The following operations are related to GetBucketPolicyStatus
:
This operation is not supported for directory buckets.
Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".
The following operations are related to GetBucketPolicyStatus
:
This operation is not supported by directory buckets.
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon S3 User Guide.
This action requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
This operation is not supported for directory buckets.
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon S3 User Guide.
This action requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
This operation is not supported by directory buckets.
Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.
The following operations are related to GetBucketRequestPayment
:
This operation is not supported for directory buckets.
Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.
The following operations are related to GetBucketRequestPayment
:
This operation is not supported by directory buckets.
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSet
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
This operation is not supported for directory buckets.
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSet
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
This operation is not supported by directory buckets.
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled
, the bucket owner must use an authentication device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
This operation is not supported for directory buckets.
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled
, the bucket owner must use an authentication device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
This operation is not supported by directory buckets.
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET action requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to GetBucketWebsite
:
This operation is not supported for directory buckets.
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET action requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to GetBucketWebsite
:
This operation is not supported by directory buckets.
Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl
permissions or READ_ACP
access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide
This functionality is not supported for Amazon S3 on Outposts.
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetObjectAcl
:
This operation is not supported for directory buckets.
Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl
permissions or READ_ACP
access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide
This functionality is not supported for Amazon S3 on Outposts.
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetObjectAcl
:
This operation is not supported by directory buckets.
Gets an object's current legal hold status. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectLegalHold
:
This operation is not supported for directory buckets.
Gets an object's current legal hold status. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectLegalHold
:
This operation is not supported by directory buckets.
Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The following action is related to GetObjectLockConfiguration
:
This operation is not supported for directory buckets.
Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The following action is related to GetObjectLockConfiguration
:
This operation is not supported by directory buckets.
Retrieves an object's retention settings. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectRetention
:
This operation is not supported for directory buckets.
Retrieves an object's retention settings. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectRetention
:
This operation is not supported by directory buckets.
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following actions are related to GetObjectTagging
:
This operation is not supported for directory buckets.
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following actions are related to GetObjectTagging
:
This operation is not supported by directory buckets.
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectTorrent
:
This operation is not supported for directory buckets.
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This functionality is not supported for Amazon S3 on Outposts.
The following action is related to GetObjectTorrent
:
This operation is not supported by directory buckets.
Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to GetPublicAccessBlock
:
This operation is not supported for directory buckets.
Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to GetPublicAccessBlock
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported by directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported for directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
This operation is not supported by directory buckets.
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
This operation is not supported for directory buckets.
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
This operation is not supported by directory buckets.
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to ListBucketIntelligentTieringConfigurations
include:
This operation is not supported for directory buckets.
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to ListBucketIntelligentTieringConfigurations
include:
This operation is not supported by directory buckets.
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
This operation is not supported for directory buckets.
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
This operation is not supported by directory buckets.
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
This operation is not supported for directory buckets.
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
This operation is not supported by directory buckets.
Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets
permission.
For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.
We strongly recommend using only paginated requests. Unpaginated requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.
This operation is not supported for directory buckets.
Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets
policy action.
For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.
We strongly recommend using only paginated ListBuckets
requests. Unpaginated ListBuckets
requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets
requests to list your account’s buckets. All unpaginated ListBuckets
requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.
This operation is not supported by directory buckets.
Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.
To use this operation, you must have permission to perform the s3:ListBucketVersions
action. Be aware of the name difference.
A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use this operation, you must have READ access to the bucket.
The following operations are related to ListObjectVersions
:
This operation is not supported for directory buckets.
Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.
To use this operation, you must have permission to perform the s3:ListBucketVersions
action. Be aware of the name difference.
A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use this operation, you must have READ access to the bucket.
The following operations are related to ListObjectVersions
:
This operation is not supported by directory buckets.
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
This operation is not supported for directory buckets.
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
This operation is not supported by directory buckets.
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
This operation is not supported for directory buckets.
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
This operation is not supported by directory buckets.
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have the WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The following operations are related to PutBucketAcl
:
This operation is not supported for directory buckets.
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have the WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The following operations are related to PutBucketAcl
:
This operation is not supported by directory buckets.
Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport
request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketAnalyticsConfiguration
has the following special errors:
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketAnalyticsConfiguration
:
This operation is not supported for directory buckets.
Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport
request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketAnalyticsConfiguration
has the following special errors:
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketAnalyticsConfiguration
:
This operation is not supported by directory buckets.
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
The following operations are related to PutBucketCors
:
This operation is not supported for directory buckets.
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
The following operations are related to PutBucketCors
:
This operation is not supported by directory buckets.
Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.
PutBucketIntelligentTieringConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration
bucket permission to set the configuration on the bucket.
This operation is not supported for directory buckets.
Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.
PutBucketIntelligentTieringConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration
bucket permission to set the configuration on the bucket.
This operation is not supported by directory buckets.
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same Amazon Web Services Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon S3 User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permission to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others.
The s3:PutInventoryConfiguration
permission allows a user to create an S3 Inventory report that includes all object metadata fields available and to specify the destination bucket to store the inventory. A user with read access to objects in the destination bucket can also access all object metadata fields that are available in the inventory report.
To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the Amazon S3 User Guide. For more information about the metadata fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the Amazon S3 User Guide.
PutBucketInventoryConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketInventoryConfiguration
:
This operation is not supported for directory buckets.
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same Amazon Web Services Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon S3 User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permission to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others.
The s3:PutInventoryConfiguration
permission allows a user to create an S3 Inventory report that includes all object metadata fields available and to specify the destination bucket to store the inventory. A user with read access to objects in the destination bucket can also access all object metadata fields that are available in the inventory report.
To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the Amazon S3 User Guide. For more information about the metadata fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the Amazon S3 User Guide.
PutBucketInventoryConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketInventoryConfiguration
:
This operation is not supported by directory buckets.
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
The following operations are related to PutBucketLifecycle
:
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
This operation is not supported for directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
The following operations are related to PutBucketLifecycle
:
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
This operation is not supported by directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility for general purpose buckets. For the related API description, see PutBucketLifecycle.
Lifecyle configurations for directory buckets only support expiring objects and cancelling multipart uploads. Expiring of versioned objects,transitions and tag filters are not supported.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration
permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.
For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to PutBucketLifecycleConfiguration
:
This operation is not supported by directory buckets.
Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee
request element to grant access to other people. The Permissions
request element specifies the kind of access the grantee has to the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee
request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName
is optional and ignored in the request.
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>
The grantee is resolved to the CanonicalUser
and, in a response to a GETObjectAcl
request, appears as the CanonicalUser.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
To enable logging, you use LoggingEnabled
and its children request elements. To disable logging, you use an empty BucketLoggingStatus
request element:
<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />
For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.
For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.
The following operations are related to PutBucketLogging
:
This operation is not supported for directory buckets.
Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee
request element to grant access to other people. The Permissions
request element specifies the kind of access the grantee has to the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee
request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName
is optional and ignored in the request.
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>
The grantee is resolved to the CanonicalUser
and, in a response to a GETObjectAcl
request, appears as the CanonicalUser.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
To enable logging, you use LoggingEnabled
and its children request elements. To disable logging, you use an empty BucketLoggingStatus
request element:
<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />
For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.
For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.
The following operations are related to PutBucketLogging
:
This operation is not supported by directory buckets.
Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to PutBucketMetricsConfiguration
:
PutBucketMetricsConfiguration
has the following special error:
Error code: TooManyConfigurations
Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Status Code: HTTP 400 Bad Request
This operation is not supported for directory buckets.
Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to PutBucketMetricsConfiguration
:
PutBucketMetricsConfiguration
has the following special error:
Error code: TooManyConfigurations
Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Status Code: HTTP 400 Bad Request
This operation is not supported by directory buckets.
No longer used, see the PutBucketNotificationConfiguration operation.
", + "documentation":"This operation is not supported for directory buckets.
No longer used, see the PutBucketNotificationConfiguration operation.
", "deprecated":true, "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1103,7 +1103,7 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationConfigurationRequest"}, - "documentation":"This operation is not supported by directory buckets.
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with the required s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
This operation is not supported for directory buckets.
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with the required s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
This operation is not supported by directory buckets.
Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using object ownership.
The following operations are related to PutBucketOwnershipControls
:
This operation is not supported for directory buckets.
Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using object ownership.
The following operations are related to PutBucketOwnershipControls
:
This operation is not supported by directory buckets.
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 User Guide.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information. You can invoke this request for a specific Amazon Web Services Region by using the aws:RequestedRegion
condition key.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of replication-related error codes
To create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
permissions for the bucket.
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
The following operations are related to PutBucketReplication
:
This operation is not supported for directory buckets.
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 User Guide.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information. You can invoke this request for a specific Amazon Web Services Region by using the aws:RequestedRegion
condition key.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of replication-related error codes
To create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
permissions for the bucket.
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
The following operations are related to PutBucketReplication
:
This operation is not supported by directory buckets.
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
This operation is not supported for directory buckets.
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
This operation is not supported by directory buckets.
Sets the tags for a bucket.
Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.
When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
This operation is not supported for directory buckets.
Sets the tags for a bucket.
Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.
When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
This operation is not supported by directory buckets.
When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT
or DELETE
) on objects in the bucket.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
This operation is not supported for directory buckets.
When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT
or DELETE
) on objects in the bucket.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
This operation is not supported by directory buckets.
Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.
The maximum request length is limited to 128 KB.
", + "documentation":"This operation is not supported for directory buckets.
Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.
The maximum request length is limited to 128 KB.
", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1230,6 +1230,12 @@ }, "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, + "errors":[ + {"shape":"InvalidRequest"}, + {"shape":"InvalidWriteOffset"}, + {"shape":"TooManyParts"}, + {"shape":"EncryptionTypeMismatch"} + ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", "documentation":"Adds an object to a bucket.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:
S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.
This functionality is not supported for directory buckets.
S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.
This functionality is not supported for directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your PutObject
request includes specific headers.
s3:PutObject
- To successfully complete the PutObject
request, you must always have the s3:PutObject
permission on a bucket to add an object to it.
s3:PutObjectAcl
- To successfully change the objects ACL of your PutObject
request, you must have the s3:PutObjectAcl
.
s3:PutObjectTagging
- To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key.
General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
Directory bucket - This functionality is not supported for directory buckets.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For more information about related Amazon S3 APIs, see the following:
", "httpChecksum":{ @@ -1249,7 +1255,7 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"This operation is not supported by directory buckets.
Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have the WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.
This functionality is not supported for Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
The following operations are related to PutObjectAcl
:
This operation is not supported for directory buckets.
Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have the WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.
This functionality is not supported for Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
The following operations are related to PutObjectAcl
:
This operation is not supported by directory buckets.
Applies a legal hold configuration to the specified object. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
", + "documentation":"This operation is not supported for directory buckets.
Applies a legal hold configuration to the specified object. For more information, see Locking Objects.
This functionality is not supported for Amazon S3 on Outposts.
", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1277,7 +1283,7 @@ }, "input":{"shape":"PutObjectLockConfigurationRequest"}, "output":{"shape":"PutObjectLockConfigurationOutput"}, - "documentation":"This operation is not supported by directory buckets.
Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
You can enable Object Lock for new or existing buckets. For more information, see Configuring Object Lock.
This operation is not supported for directory buckets.
Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
You can enable Object Lock for new or existing buckets. For more information, see Configuring Object Lock.
This operation is not supported by directory buckets.
Places an Object Retention configuration on an object. For more information, see Locking Objects. Users or accounts require the s3:PutObjectRetention
permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the s3:BypassGovernanceRetention
permission.
This functionality is not supported for Amazon S3 on Outposts.
", + "documentation":"This operation is not supported for directory buckets.
Places an Object Retention configuration on an object. For more information, see Locking Objects. Users or accounts require the s3:PutObjectRetention
permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the s3:BypassGovernanceRetention
permission.
This functionality is not supported for Amazon S3 on Outposts.
", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1305,7 +1311,7 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "documentation":"This operation is not supported by directory buckets.
Sets the supplied tag-set to an object that already exists in a bucket. A tag is a key-value pair. For more information, see Object Tagging.
You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
PutObjectTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the object.
The following operations are related to PutObjectTagging
:
This operation is not supported for directory buckets.
Sets the supplied tag-set to an object that already exists in a bucket. A tag is a key-value pair. For more information, see Object Tagging.
You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
PutObjectTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the object.
The following operations are related to PutObjectTagging
:
This operation is not supported by directory buckets.
Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to PutPublicAccessBlock
:
This operation is not supported for directory buckets.
Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to PutPublicAccessBlock
:
This operation is not supported by directory buckets.
Restores an archived copy of an object back into Amazon S3
This functionality is not supported for Amazon S3 on Outposts.
This action performs the following types of requests:
restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object, you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special errors:
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
The following operations are related to RestoreObject
:
This operation is not supported for directory buckets.
Restores an archived copy of an object back into Amazon S3
This functionality is not supported for Amazon S3 on Outposts.
This action performs the following types of requests:
restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object, you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special errors:
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
The following operations are related to RestoreObject
:
This operation is not supported by directory buckets.
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This functionality is not supported for Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.
You must have the s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response.
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class: You cannot query objects in the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the ARCHIVE_ACCESS
or DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
The following operations are related to SelectObjectContent
:
This operation is not supported for directory buckets.
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This functionality is not supported for Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.
You must have the s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response.
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class: You cannot query objects in the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the ARCHIVE_ACCESS
or DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
The following operations are related to SelectObjectContent
:
This operation is not supported by directory buckets.
Passes transformed objects to a GetObject
operation when using Object Lambda access points. For information about Object Lambda access points, see Transforming objects with Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to RequestRoute
, RequestToken
, StatusCode
, ErrorCode
, and ErrorMessage
. The GetObject
response metadata is supported so that the WriteGetObjectResponse
caller, typically an Lambda function, can provide the same metadata when it internally invokes GetObject
. When WriteGetObjectResponse
is called by a customer-owned Lambda function, the metadata returned to the end user GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
. The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact personally identifiable information (PII) and decompress S3 objects. These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.
Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
", + "documentation":"This operation is not supported for directory buckets.
Passes transformed objects to a GetObject
operation when using Object Lambda access points. For information about Object Lambda access points, see Transforming objects with Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to RequestRoute
, RequestToken
, StatusCode
, ErrorCode
, and ErrorMessage
. The GetObject
response metadata is supported so that the WriteGetObjectResponse
caller, typically an Lambda function, can provide the same metadata when it internally invokes GetObject
. When WriteGetObjectResponse
is called by a customer-owned Lambda function, the metadata returned to the end user GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
. The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact personally identifiable information (PII) and decompress S3 objects. These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.
Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
", "authtype":"v4-unsigned-body", "endpoint":{ "hostPrefix":"{RequestRoute}." @@ -1467,6 +1473,12 @@ "documentation":"The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
If present, this header aborts an in progress multipart upload only if it was initiated on the provided timestamp. If the initiated timestamp of the multipart upload does not match the provided value, the operation returns a 412 Precondition Failed
error. If the initiated timestamp matches or if the multipart upload doesn’t exist, the operation returns a 204 Success (No Content)
response.
This functionality is only supported for directory buckets.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The If-Match
header field makes the request method conditional on ETags. If the ETag value does not match, the operation returns a 412 Precondition Failed
error. If the ETag matches or if the object doesn't exist, the operation will return a 204 Success (No Content) response
.
For more information about conditional requests, see RFC 7232.
This functionality is only supported for directory buckets.
If present, the object is deleted only if its modification times matches the provided Timestamp
. If the Timestamp
values do not match, the operation returns a 412 Precondition Failed
error. If the Timestamp
matches or if the object doesn’t exist, the operation returns a 204 Success (No Content)
response.
This functionality is only supported for directory buckets.
If present, the object is deleted only if its size matches the provided size in bytes. If the Size
value does not match, the operation returns a 412 Precondition Failed
error. If the Size
matches or if the object doesn’t exist, the operation returns a 204 Success (No Content)
response.
This functionality is only supported for directory buckets.
You can use the If-Match
, x-amz-if-match-last-modified-time
and x-amz-if-match-size
conditional headers in conjunction with each-other or individually.
Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
The existing object was created with a different encryption type. Subsequent write requests must include the appropriate encryption parameters in the request or while creating the session.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "End":{ "type":"long", "box":true @@ -4233,7 +4271,7 @@ }, "TransitionDefaultMinimumObjectSize":{ "shape":"TransitionDefaultMinimumObjectSize", - "documentation":"Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
The creation date of the object.
", + "documentation":"Date and time when the object was last modified.
", "location":"header", "locationName":"Last-Modified" }, @@ -5944,6 +5982,18 @@ "HttpRedirectCode":{"type":"string"}, "ID":{"type":"string"}, "IfMatch":{"type":"string"}, + "IfMatchInitiatedTime":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, + "IfMatchLastModifiedTime":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, + "IfMatchSize":{ + "type":"long", + "box":true + }, "IfModifiedSince":{"type":"timestamp"}, "IfNoneMatch":{"type":"string"}, "IfUnmodifiedSince":{"type":"timestamp"}, @@ -6088,6 +6138,22 @@ "error":{"httpStatusCode":403}, "exception":true }, + "InvalidRequest":{ + "type":"structure", + "members":{ + }, + "documentation":"You may receive this error in multiple cases. Depending on the reason for the error, you may receive one of the messages below:
Cannot specify both a write offset value and user-defined object metadata for existing objects.
Checksum Type mismatch occurred, expected checksum Type: sha1, actual checksum Type: crc32c.
Request body cannot be empty when 'write offset' is specified.
The write offset value that you specified does not match the current object size.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "InventoryConfiguration":{ "type":"structure", "required":[ @@ -6343,6 +6409,10 @@ "flattened":true }, "LastModified":{"type":"timestamp"}, + "LastModifiedTime":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, "LifecycleConfiguration":{ "type":"structure", "required":["Rules"], @@ -6360,7 +6430,7 @@ "members":{ "Date":{ "shape":"Date", - "documentation":"Indicates at what date the object is to be moved or deleted. The date value must conform to the ISO 8601 format. The time is always midnight UTC.
" + "documentation":"Indicates at what date the object is to be moved or deleted. The date value must conform to the ISO 8601 format. The time is always midnight UTC.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
" + "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Container for the expiration for the lifecycle of the object.
For more information see, Managing your storage lifecycle in the Amazon S3 User Guide.
" @@ -6392,7 +6462,7 @@ }, "Filter":{ "shape":"LifecycleRuleFilter", - "documentation":"The Filter
is used to identify objects that a Lifecycle Rule applies to. A Filter
must have exactly one of Prefix
, Tag
, or And
specified. Filter
is required if the LifecycleRule
does not contain a Prefix
element.
The Filter
is used to identify objects that a Lifecycle Rule applies to. A Filter
must have exactly one of Prefix
, Tag
, or And
specified. Filter
is required if the LifecycleRule
does not contain a Prefix
element.
Tag
filters are not supported for directory buckets.
Specifies when an Amazon S3 object transitions to a specified storage class.
", + "documentation":"Specifies when an Amazon S3 object transitions to a specified storage class.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.
", + "documentation":"Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
This tag must exist in the object's tag set in order for the rule to apply.
" + "documentation":"This tag must exist in the object's tag set in order for the rule to apply.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. The value must be a non-zero positive integer. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon S3 User Guide.
" + "documentation":"Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. The value must be a non-zero positive integer. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon S3 User Guide.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "documentation":"Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.
" + "documentation":"Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Version ID for the specific version of the object to delete.
This functionality is not supported for directory buckets.
An entity tag (ETag) is an identifier assigned by a web server to a specific version of a resource found at a URL. This header field makes the request method conditional on ETags
.
Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings unique to the object.
If present, the objects are deleted only if its modification times matches the provided Timestamp
.
This functionality is only supported for directory buckets.
If present, the objects are deleted only if its size matches the provided size in bytes.
This functionality is only supported for directory buckets.
Object Identifier is unique value to identify objects.
" @@ -8589,7 +8671,7 @@ "members":{ "TransitionDefaultMinimumObjectSize":{ "shape":"TransitionDefaultMinimumObjectSize", - "documentation":"Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
This parameter applies to general purpose buckets only. It is not supported for directory bucket lifecycle configurations.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The size of the object in bytes. This will only be present if you append to an object.
This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.
Specifies the offset for appending data to existing objects in bytes. The offset must be equal to the size of the existing object being appended to. If no object exists, setting this header to 0 will create a new object.
This functionality is only supported for objects in the Amazon S3 Express One Zone storage class in directory buckets.
A map of metadata to store with the object in S3.
", @@ -10968,6 +11062,14 @@ "flattened":true }, "Token":{"type":"string"}, + "TooManyParts":{ + "type":"structure", + "members":{ + }, + "documentation":"You have attempted to add more parts than the maximum of 10000 that are allowed for this object. You can use the CopyObject operation to copy this object to another and then add more data to the newly copied object.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "TopicArn":{"type":"string"}, "TopicConfiguration":{ "type":"structure", @@ -11729,6 +11831,10 @@ }, "payload":"Body" }, + "WriteOffsetBytes":{ + "type":"long", + "box":true + }, "Years":{ "type":"integer", "box":true diff --git a/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json b/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json index da5d6c97f7..fed0f28108 100644 --- a/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json +++ b/botocore/data/ssm-quicksetup/2018-05-10/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "NextToken", "limit_key": "MaxItems", "result_key": "ConfigurationManagersList" + }, + "ListConfigurations": { + "input_token": "StartingToken", + "output_token": "NextToken", + "limit_key": "MaxItems", + "result_key": "ConfigurationsList" } } } diff --git a/botocore/data/ssm-quicksetup/2018-05-10/service-2.json b/botocore/data/ssm-quicksetup/2018-05-10/service-2.json index 7d0677cdca..ad19abf68d 100644 --- a/botocore/data/ssm-quicksetup/2018-05-10/service-2.json +++ b/botocore/data/ssm-quicksetup/2018-05-10/service-2.json @@ -51,6 +51,25 @@ "documentation":"Deletes a configuration manager.
", "idempotent":true }, + "GetConfiguration":{ + "name":"GetConfiguration", + "http":{ + "method":"GET", + "requestUri":"/getConfiguration/{ConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"GetConfigurationInput"}, + "output":{"shape":"GetConfigurationOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns details about the specified configuration.
" + }, "GetConfigurationManager":{ "name":"GetConfigurationManager", "http":{ @@ -104,6 +123,24 @@ ], "documentation":"Returns Quick Setup configuration managers.
" }, + "ListConfigurations":{ + "name":"ListConfigurations", + "http":{ + "method":"POST", + "requestUri":"/listConfigurations", + "responseCode":200 + }, + "input":{"shape":"ListConfigurationsInput"}, + "output":{"shape":"ListConfigurationsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Returns configurations deployed by Quick Setup in the requesting Amazon Web Services account and Amazon Web Services Region.
" + }, "ListQuickSetupTypes":{ "name":"ListQuickSetupTypes", "http":{ @@ -298,7 +335,7 @@ }, "Parameters":{ "shape":"ConfigurationParametersMap", - "documentation":"The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.
DelegatedAccountId
Description: (Required) The ID of the delegated administrator account.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target.
ICalendarString
Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
UpdateSSMAgent
Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true
\".
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
SelectedAggregatorRegion
Description: (Required) The Amazon Web Services Region where you want to create the aggregator index.
ReplaceExistingAggregator
Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the SelectedAggregatorRegion
.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
DelegatedAccountId
Description: (Required) The ID of the delegated administrator account.
JobFunction
Description: (Required) The name for the Change Manager job function.
PermissionType
Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are CustomPermissions
and AdminPermissions
. The default value for the parameter is CustomerPermissions
.
CustomPermissions
Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify CustomPermissions
for the PermissionType
parameter.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
AnalyseAllResources
Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all CloudFormation stacks in the account. The default value is \"false
\".
EnableSnsNotifications
Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \"true
\".
EnableSsmOpsItems
Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \"true
\".
EnableDriftRemediation
Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \"false
\".
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(1 days)
, and none
. The default value is \"none
\".
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
DelegatedAccountId
Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(2 days)
, and none
. The default value is \"none
\".
CPackNames
Description: (Required) A comma separated list of Config conformance packs.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
RecordAllResources
Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \"true
\".
ResourceTypesToRecord
Description: (Optional) A comma separated list of resource types you want to record.
RecordGlobalResourceTypes
Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \"false
\".
GlobalResourceTypesRegion
Description: (Optional) Determines the Amazon Web Services Region where global resources are recorded.
UseCustomBucket
Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \"false
\".
DeliveryBucketName
Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver configuration snapshots and configuration history files to.
DeliveryBucketPrefix
Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.
NotificationOptions
Description: (Optional) Determines the notification configuration for the recorder. The valid values are NoStreaming
, UseExistingTopic
, and CreateTopic
. The default value is NoStreaming
.
CustomDeliveryTopicAccountId
Description: (Optional) The ID of the Amazon Web Services account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the UseExistingTopic
notification option.
CustomDeliveryTopicName
Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the UseExistingTopic
notification option.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(7 days)
, rate(1 days)
, and none
. The default value is \"none
\".
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
UpdateSSMAgent
Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true
\".
UpdateEc2LaunchAgent
Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"false
\".
CollectInventory
Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"true
\".
ScanInstances
Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \"true
\".
InstallCloudWatchAgent
Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \"false
\".
UpdateCloudWatchAgent
Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \"false
\".
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
PackagesToInstall
Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are AWSEFSTools
, AWSCWAgent
, and AWSEC2LaunchAgent
.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(2 days)
, and none
. The default value is \"rate(30 days)
\".
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
PatchPolicyName
Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.
SelectedPatchBaselines
Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.
PatchBaselineUseDefault
Description: (Optional) A boolean value that determines whether the selected patch baselines are all Amazon Web Services provided.
ConfigurationOptionsPatchOperation
Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are Scan
and ScanAndInstall
. The default value for the parameter is Scan
.
ConfigurationOptionsScanValue
Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.
ConfigurationOptionsInstallValue
Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.
ConfigurationOptionsScanNextInterval
Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false
\".
ConfigurationOptionsInstallNextInterval
Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false
\".
RebootOption
Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \"false
\".
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
OutputLogEnableS3
Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.
OutputS3Location
Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.
OutputS3BucketRegion
Description: (Optional) The Amazon Web Services Region where the Amazon S3 bucket you want Config to deliver command output to is located.
OutputS3BucketName
Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver command output to.
OutputS3KeyPrefix
Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.
DelegatedAccountId
Description: (Required) The ID of the delegated administrator account.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target.
ICalendarString
Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
UpdateSSMAgent
Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true
\".
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
SelectedAggregatorRegion
Description: (Required) The Amazon Web Services Region where you want to create the aggregator index.
ReplaceExistingAggregator
Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the SelectedAggregatorRegion
.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
DelegatedAccountId
Description: (Required) The ID of the delegated administrator account.
JobFunction
Description: (Required) The name for the Change Manager job function.
PermissionType
Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are CustomPermissions
and AdminPermissions
. The default value for the parameter is CustomerPermissions
.
CustomPermissions
Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify CustomPermissions
for the PermissionType
parameter.
TargetOrganizationalUnits
Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
AnalyseAllResources
Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all CloudFormation stacks in the account. The default value is \"false
\".
EnableSnsNotifications
Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \"true
\".
EnableSsmOpsItems
Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \"true
\".
EnableDriftRemediation
Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \"false
\".
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(1 days)
, and none
. The default value is \"none
\".
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
DelegatedAccountId
Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(2 days)
, and none
. The default value is \"none
\".
CPackNames
Description: (Required) A comma separated list of Config conformance packs.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
RecordAllResources
Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \"true
\".
ResourceTypesToRecord
Description: (Optional) A comma separated list of resource types you want to record.
RecordGlobalResourceTypes
Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \"false
\".
GlobalResourceTypesRegion
Description: (Optional) Determines the Amazon Web Services Region where global resources are recorded.
UseCustomBucket
Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \"false
\".
DeliveryBucketName
Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver configuration snapshots and configuration history files to.
DeliveryBucketPrefix
Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.
NotificationOptions
Description: (Optional) Determines the notification configuration for the recorder. The valid values are NoStreaming
, UseExistingTopic
, and CreateTopic
. The default value is NoStreaming
.
CustomDeliveryTopicAccountId
Description: (Optional) The ID of the Amazon Web Services account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the UseExistingTopic
notification option.
CustomDeliveryTopicName
Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the UseExistingTopic
notification option.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(7 days)
, rate(1 days)
, and none
. The default value is \"none
\".
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
UpdateSSMAgent
Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \"true
\".
UpdateEc2LaunchAgent
Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"false
\".
CollectInventory
Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \"true
\".
ScanInstances
Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \"true
\".
InstallCloudWatchAgent
Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \"false
\".
UpdateCloudWatchAgent
Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \"false
\".
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
PackagesToInstall
Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are AWSEFSTools
, AWSCWAgent
, and AWSEC2LaunchAgent
.
RemediationSchedule
Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are rate(30 days)
, rate(14 days)
, rate(2 days)
, and none
. The default value is \"rate(30 days)
\".
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
PatchPolicyName
Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.
SelectedPatchBaselines
Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.
PatchBaselineUseDefault
Description: (Optional) A boolean value that determines whether the selected patch baselines are all Amazon Web Services provided.
ConfigurationOptionsPatchOperation
Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are Scan
and ScanAndInstall
. The default value for the parameter is Scan
.
ConfigurationOptionsScanValue
Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.
ConfigurationOptionsInstallValue
Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.
ConfigurationOptionsScanNextInterval
Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false
\".
ConfigurationOptionsInstallNextInterval
Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \"false
\".
RebootOption
Description: (Optional) Determines whether instances are rebooted after patches are installed. Valid values are RebootIfNeeded
and NoReboot
.
IsPolicyAttachAllowed
Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \"false
\".
OutputLogEnableS3
Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.
OutputS3Location
Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.
OutputS3BucketRegion
Description: (Optional) The Amazon Web Services Region where the Amazon S3 bucket you want Config to deliver command output to is located.
OutputS3BucketName
Description: (Optional) The name of the Amazon S3 bucket you want Config to deliver command output to.
OutputS3KeyPrefix
Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.
TargetType
Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are *
, InstanceIds
, ResourceGroups
, and Tags
. Use *
to target all instances in the account.
TargetInstances
Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify InstanceIds
for the TargetType
parameter.
TargetTagKey
Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
TargetTagValue
Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify Tags
for the TargetType
parameter.
ResourceGroupName
Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify ResourceGroups
for the TargetType
parameter.
TargetAccounts
Description: (Optional) The ID of the Amazon Web Services account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either TargetAccounts
or TargetOrganizationalUnits
.
TargetOrganizationalUnits
Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.
TargetRegions
Description: (Required) A comma separated list of Amazon Web Services Regions you want to deploy the configuration to.
The ID of the Amazon Web Services account where the configuration was deployed.
" + }, + "ConfigurationDefinitionId":{ + "shape":"String", + "documentation":"The ID of the configuration definition.
" + }, + "CreatedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The datetime stamp when the configuration was created.
" + }, + "FirstClassParameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"The common parameters and values for the configuration definition.
" + }, + "Id":{ + "shape":"String", + "documentation":"A service generated identifier for the configuration.
" + }, + "ManagerArn":{ + "shape":"String", + "documentation":"The ARN of the configuration manager.
" + }, + "Region":{ + "shape":"String", + "documentation":"The Amazon Web Services Region where the configuration was deployed.
" + }, + "StatusSummaries":{ + "shape":"StatusSummariesList", + "documentation":"A summary of the state of the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more.
" + }, + "Type":{ + "shape":"String", + "documentation":"The type of the Quick Setup configuration.
" + }, + "TypeVersion":{ + "shape":"String", + "documentation":"The version of the Quick Setup type used.
" + } + }, + "documentation":"Details for a Quick Setup configuration.
" + }, + "ConfigurationsList":{ + "type":"list", + "member":{"shape":"ConfigurationSummary"} + }, "ConflictException":{ "type":"structure", "members":{ @@ -524,6 +611,22 @@ "type":"list", "member":{"shape":"Filter"} }, + "GetConfigurationInput":{ + "type":"structure", + "required":["ConfigurationId"], + "members":{ + "ConfigurationId":{ + "shape":"GetConfigurationInputConfigurationIdString", + "documentation":"A service generated identifier for the configuration.
", + "location":"uri", + "locationName":"ConfigurationId" + } + } + }, + "GetConfigurationInputConfigurationIdString":{ + "type":"string", + "pattern":"^[a-zA-Z0-9-_/:]{1,100}$" + }, "GetConfigurationManagerInput":{ "type":"structure", "required":["ManagerArn"], @@ -579,6 +682,55 @@ } } }, + "GetConfigurationOutput":{ + "type":"structure", + "members":{ + "Account":{ + "shape":"String", + "documentation":"The ID of the Amazon Web Services account where the configuration was deployed.
" + }, + "ConfigurationDefinitionId":{ + "shape":"String", + "documentation":"The ID of the configuration definition.
" + }, + "CreatedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The datetime stamp when the configuration manager was created.
" + }, + "Id":{ + "shape":"String", + "documentation":"A service generated identifier for the configuration.
" + }, + "LastModifiedAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"The datetime stamp when the configuration manager was last updated.
" + }, + "ManagerArn":{ + "shape":"String", + "documentation":"The ARN of the configuration manager.
" + }, + "Parameters":{ + "shape":"ConfigurationParametersMap", + "documentation":"The parameters for the configuration definition type.
" + }, + "Region":{ + "shape":"String", + "documentation":"The Amazon Web Services Region where the configuration was deployed.
" + }, + "StatusSummaries":{ + "shape":"StatusSummariesList", + "documentation":"A summary of the state of the configuration manager. This includes deployment statuses, association statuses, drift statuses, health checks, and more.
" + }, + "Type":{ + "shape":"String", + "documentation":"The type of the Quick Setup configuration.
" + }, + "TypeVersion":{ + "shape":"String", + "documentation":"The version of the Quick Setup type used.
" + } + } + }, "GetServiceSettingsOutput":{ "type":"structure", "members":{ @@ -642,6 +794,64 @@ } } }, + "ListConfigurationsInput":{ + "type":"structure", + "members":{ + "ConfigurationDefinitionId":{ + "shape":"ListConfigurationsInputConfigurationDefinitionIdString", + "documentation":"The ID of the configuration definition.
" + }, + "Filters":{ + "shape":"FiltersList", + "documentation":"Filters the results returned by the request.
" + }, + "ManagerArn":{ + "shape":"ListConfigurationsInputManagerArnString", + "documentation":"The ARN of the configuration manager.
" + }, + "MaxItems":{ + "shape":"ListConfigurationsInputMaxItemsInteger", + "documentation":"Specifies the maximum number of configurations that are returned by the request.
" + }, + "StartingToken":{ + "shape":"ListConfigurationsInputStartingTokenString", + "documentation":"The token to use when requesting a specific set of items from a list.
" + } + } + }, + "ListConfigurationsInputConfigurationDefinitionIdString":{ + "type":"string", + "pattern":"^[a-z0-9-]{1,20}$" + }, + "ListConfigurationsInputManagerArnString":{ + "type":"string", + "pattern":"^arn:aws:ssm-quicksetup:([^:]+):(\\d{12}):configuration-manager/[0-9a-fA-F]{8}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{4}\\b-[0-9a-fA-F]{12}$" + }, + "ListConfigurationsInputMaxItemsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListConfigurationsInputStartingTokenString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^[A-Za-z0-9+=@_|\\/\\s-]*$" + }, + "ListConfigurationsOutput":{ + "type":"structure", + "members":{ + "ConfigurationsList":{ + "shape":"ConfigurationsList", + "documentation":"An array of configurations.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
" + } + } + }, "ListQuickSetupTypesOutput":{ "type":"structure", "members":{ diff --git a/botocore/data/ssm/2014-11-06/paginators-1.json b/botocore/data/ssm/2014-11-06/paginators-1.json index b467911e76..3aafccc6a2 100644 --- a/botocore/data/ssm/2014-11-06/paginators-1.json +++ b/botocore/data/ssm/2014-11-06/paginators-1.json @@ -287,6 +287,18 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "InstanceProperties" + }, + "ListNodes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Nodes" + }, + "ListNodesSummary": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summary" } } } diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index 2eac00f7a6..de7db8a044 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -239,7 +239,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination
and SyncFromSource
.
You can configure Systems Manager Inventory to use the SyncToDestination
type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.
You can configure Systems Manager Explorer to use the SyncFromSource
type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization
by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide.
A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.
By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.
A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination
and SyncFromSource
.
You can configure Systems Manager Inventory to use the SyncToDestination
type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creating a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.
You can configure Systems Manager Explorer to use the SyncFromSource
type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization
by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide.
A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.
By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.
Gets the contents of the specified Amazon Web Services Systems Manager document (SSM document).
" }, + "GetExecutionPreview":{ + "name":"GetExecutionPreview", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetExecutionPreviewRequest"}, + "output":{"shape":"GetExecutionPreviewResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Initiates the process of retrieving an existing preview that shows the effects that running a specified Automation runbook would have on the targeted resources.
" + }, "GetInventory":{ "name":"GetInventory", "http":{ @@ -1538,6 +1552,41 @@ ], "documentation":"A list of inventory items returned by the request.
" }, + "ListNodes":{ + "name":"ListNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListNodesRequest"}, + "output":{"shape":"ListNodesResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"}, + {"shape":"ResourceDataSyncNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Takes in filters and returns a list of managed nodes matching the filter criteria.
" + }, + "ListNodesSummary":{ + "name":"ListNodesSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListNodesSummaryRequest"}, + "output":{"shape":"ListNodesSummaryResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidAggregatorException"}, + {"shape":"InvalidFilter"}, + {"shape":"InvalidNextToken"}, + {"shape":"ResourceDataSyncNotFoundException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"Generates a summary of managed instance/node metadata based on the filters and aggregators you specify. Results are grouped by the input aggregator you specify.
" + }, "ListOpsItemEvents":{ "name":"ListOpsItemEvents", "http":{ @@ -1934,6 +1983,20 @@ ], "documentation":"Creates a change request for Change Manager. The Automation runbooks specified in the change request run only after all required approvals for the change request have been received.
" }, + "StartExecutionPreview":{ + "name":"StartExecutionPreview", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartExecutionPreviewRequest"}, + "output":{"shape":"StartExecutionPreviewResponse"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ValidationException"} + ], + "documentation":"Initiates the process of creating a preview showing the effects that running a specified Automation runbook would have on the targeted resources.
" + }, "StartSession":{ "name":"StartSession", "http":{ @@ -2359,6 +2422,15 @@ "type":"string", "max":10 }, + "AgentType":{ + "type":"string", + "max":255, + "min":1 + }, + "AgentVersion":{ + "type":"string", + "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" + }, "AggregatorSchemaOnly":{"type":"boolean"}, "Alarm":{ "type":"structure", @@ -3480,6 +3552,36 @@ "max":36, "min":36 }, + "AutomationExecutionInputs":{ + "type":"structure", + "members":{ + "Parameters":{ + "shape":"AutomationParameterMap", + "documentation":"Information about parameters that can be specified for the preview operation.
" + }, + "TargetParameterName":{ + "shape":"AutomationParameterKey", + "documentation":"The name of the parameter used as the target resource for the rate-controlled execution. Required if you specify targets.
" + }, + "Targets":{ + "shape":"Targets", + "documentation":"Information about the resources that would be included in the actual runbook execution, if it were to be run. Both Targets and TargetMaps can't be specified together.
" + }, + "TargetMaps":{ + "shape":"TargetMaps", + "documentation":"A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.
" + }, + "TargetLocations":{ + "shape":"TargetLocations", + "documentation":"Information about the Amazon Web Services Regions and Amazon Web Services accounts targeted by the Automation execution preview operation.
" + }, + "TargetLocationsURL":{ + "shape":"TargetLocationsURL", + "documentation":"A publicly accessible URL for a file that contains the TargetLocations
body. Currently, only files in presigned Amazon S3 buckets are supported.
Information about the optional inputs that can be specified for an automation execution preview.
" + }, "AutomationExecutionLimitExceededException":{ "type":"structure", "members":{ @@ -3630,6 +3732,28 @@ "documentation":"There is no automation execution information for the requested automation execution ID.
", "exception":true }, + "AutomationExecutionPreview":{ + "type":"structure", + "members":{ + "StepPreviews":{ + "shape":"StepPreviewMap", + "documentation":"Information about the type of impact a runbook step would have on a resource.
Mutating
: The runbook step would make changes to the targets through actions that create, modify, or delete resources.
Non_Mutating
: The runbook step would retrieve data about resources but not make changes to them. This category generally includes Describe*
, List*
, Get*
, and similar read-only API actions.
Undetermined
: An undetermined step invokes executions performed by another orchestration service like Lambda, Step Functions, or Amazon Web Services Systems Manager Run Command. An undetermined step might also call a third-party API. Systems Manager Automation doesn't know the outcome of the orchestration processes or third-party API executions, so the results of the steps are undetermined.
Information about the Amazon Web Services Regions targeted by the execution preview.
" + }, + "TargetPreviews":{ + "shape":"TargetPreviewList", + "documentation":"Information that provides a preview of what the impact of running the specified Automation runbook would be.
" + }, + "TotalAccounts":{ + "shape":"Integer", + "documentation":"Information about the Amazon Web Services accounts that were included in the execution preview.
" + } + }, + "documentation":"Information about the results of the execution preview.
" + }, "AutomationExecutionStatus":{ "type":"string", "enum":[ @@ -4999,7 +5123,7 @@ }, "GlobalFilters":{ "shape":"PatchFilterGroup", - "documentation":"A set of global filters used to include patches in the baseline.
" + "documentation":"A set of global filters used to include patches in the baseline.
The GlobalFilters
parameter can be configured only by using the CLI or an Amazon Web Services SDK. It can't be configured from the Patch Manager console, and its value isn't displayed in the console.
Information about the optional inputs that can be specified for an automation execution preview.
" + } + }, + "documentation":"Information about the inputs for an execution preview.
", + "union":true + }, "ExecutionMode":{ "type":"string", "enum":[ @@ -7420,6 +7555,32 @@ "Interactive" ] }, + "ExecutionPreview":{ + "type":"structure", + "members":{ + "Automation":{ + "shape":"AutomationExecutionPreview", + "documentation":"Information about the changes that would be made if an Automation workflow were run.
" + } + }, + "documentation":"Information about the changes that would be made if an execution were run.
", + "union":true + }, + "ExecutionPreviewId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + }, + "ExecutionPreviewStatus":{ + "type":"string", + "enum":[ + "Pending", + "InProgress", + "Success", + "Failed" + ] + }, "ExecutionRoleName":{ "type":"string", "max":64, @@ -7800,6 +7961,38 @@ } } }, + "GetExecutionPreviewRequest":{ + "type":"structure", + "required":["ExecutionPreviewId"], + "members":{ + "ExecutionPreviewId":{ + "shape":"ExecutionPreviewId", + "documentation":"The ID of the existing execution preview.
" + } + } + }, + "GetExecutionPreviewResponse":{ + "type":"structure", + "members":{ + "ExecutionPreviewId":{ + "shape":"ExecutionPreviewId", + "documentation":"The generated ID for the existing execution preview.
" + }, + "EndedAt":{ + "shape":"DateTime", + "documentation":"A UTC timestamp indicating when the execution preview operation ended.
" + }, + "Status":{ + "shape":"ExecutionPreviewStatus", + "documentation":"The current status of the execution preview operation.
" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"Supplemental information about the current status of the execution preview.
" + }, + "ExecutionPreview":{"shape":"ExecutionPreview"} + } + }, "GetInventoryRequest":{ "type":"structure", "members":{ @@ -8656,7 +8849,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"The ID of the service setting to get. The setting ID can be one of the following.
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
The ID of the service setting to get. The setting ID can be one of the following.
/ssm/appmanager/appmanager-enabled
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/automation/enable-adaptive-concurrency
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
The request body of the GetServiceSetting API operation.
" @@ -8718,6 +8911,14 @@ "documentation":"Error returned when an idempotent operation is retried and the parameters don't match the original call to the API with the same idempotency token.
", "exception":true }, + "ImpactType":{ + "type":"string", + "enum":[ + "Mutating", + "NonMutating", + "Undetermined" + ] + }, "IncompatiblePolicyException":{ "type":"structure", "members":{ @@ -8871,6 +9072,52 @@ "max":50, "min":0 }, + "InstanceInfo":{ + "type":"structure", + "members":{ + "AgentType":{ + "shape":"AgentType", + "documentation":"The type of agent installed on the node.
" + }, + "AgentVersion":{ + "shape":"AgentVersion", + "documentation":"The version number of the agent installed on the node.
" + }, + "ComputerName":{ + "shape":"ComputerName", + "documentation":"The fully qualified host name of the managed node.
" + }, + "InstanceStatus":{ + "shape":"InstanceStatus", + "documentation":"The current status of the managed node.
" + }, + "IpAddress":{ + "shape":"IpAddress", + "documentation":"The IP address of the managed node.
" + }, + "ManagedStatus":{ + "shape":"ManagedStatus", + "documentation":"Indicates whether the node is managed by Systems Manager.
" + }, + "PlatformType":{ + "shape":"PlatformType", + "documentation":"The operating system platform type of the managed node.
" + }, + "PlatformName":{ + "shape":"PlatformName", + "documentation":"The name of the operating system platform running on your managed node.
" + }, + "PlatformVersion":{ + "shape":"PlatformVersion", + "documentation":"The version of the OS platform running on your managed node.
" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"The type of instance, either an EC2 instance or another supported machine type in a hybrid fleet.
" + } + }, + "documentation":"Details about a specific managed node.
" + }, "InstanceInformation":{ "type":"structure", "members":{ @@ -8898,7 +9145,7 @@ }, "PlatformType":{ "shape":"PlatformType", - "documentation":"The operating system platform type.
" + "documentation":"The operating system platform type.
" }, "PlatformName":{ "shape":"String", @@ -9270,7 +9517,7 @@ }, "PlatformType":{ "shape":"PlatformType", - "documentation":"The operating system platform type of the managed node. For example, Windows.
" + "documentation":"The operating system platform type of the managed node. For example, Windows Server or Amazon Linux 2.
" }, "PlatformName":{ "shape":"PlatformName", @@ -9426,6 +9673,11 @@ "type":"string", "max":120 }, + "InstanceStatus":{ + "type":"string", + "max":255, + "min":1 + }, "InstanceTagName":{ "type":"string", "max":255 @@ -9458,7 +9710,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The activation ID isn't valid. Verify the you entered the correct ActivationId or ActivationCode and try again.
", + "documentation":"The activation ID isn't valid. Verify that you entered the correct ActivationId or ActivationCode and try again.
", "exception":true }, "InvalidAggregatorException":{ @@ -9466,7 +9718,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The specified aggregator isn't valid for inventory groups. Verify that the aggregator uses a valid inventory type such as AWS:Application
or AWS:InstanceInformation
.
The specified aggregator isn't valid for the group type. Verify that the aggregator you provided is supported.
", "exception":true }, "InvalidAllowedPatternException":{ @@ -9602,7 +9854,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The filter name isn't valid. Verify the you entered the correct name and try again.
", + "documentation":"The filter name isn't valid. Verify that you entered the correct name and try again.
", "exception":true }, "InvalidFilterKey":{ @@ -10269,6 +10521,12 @@ "type":"string", "max":2500 }, + "IpAddress":{ + "type":"string", + "max":46, + "min":1, + "sensitive":true + }, "IsSubTypeSchema":{"type":"boolean"}, "ItemContentMismatchException":{ "type":"structure", @@ -10738,6 +10996,81 @@ } } }, + "ListNodesRequest":{ + "type":"structure", + "members":{ + "SyncName":{ + "shape":"ResourceDataSyncName", + "documentation":"The name of the resource data sync to retrieve information about. Required for cross-account/cross-Region configurations. Optional for single account/single-Region configurations.
" + }, + "Filters":{ + "shape":"NodeFilterList", + "documentation":"One or more filters. Use a filter to return a more specific list of managed nodes.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"The token for the next set of items to return. (You received this token from a previous call.)
" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.
", + "box":true + } + } + }, + "ListNodesResult":{ + "type":"structure", + "members":{ + "Nodes":{ + "shape":"NodeList", + "documentation":"A list of managed nodes that match the specified filter criteria.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
" + } + } + }, + "ListNodesSummaryRequest":{ + "type":"structure", + "required":["Aggregators"], + "members":{ + "SyncName":{ + "shape":"ResourceDataSyncName", + "documentation":"The name of the resource data sync to retrieve information about. Required for cross-account/cross-Region configuration. Optional for single account/single-Region configurations.
" + }, + "Filters":{ + "shape":"NodeFilterList", + "documentation":"One or more filters. Use a filter to generate a summary that matches your specified filter criteria.
" + }, + "Aggregators":{ + "shape":"NodeAggregatorList", + "documentation":"Specify one or more aggregators to return a count of managed nodes that match that expression. For example, a count of managed nodes by operating system.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"The token for the next set of items to return. (You received this token from a previous call.) The call also returns a token that you can specify in a subsequent call to get the next set of results.
" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.
", + "box":true + } + } + }, + "ListNodesSummaryResult":{ + "type":"structure", + "members":{ + "Summary":{ + "shape":"NodeSummaryList", + "documentation":"A collection of objects reporting information about your managed nodes, such as the count of nodes by operating system.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.
" + } + } + }, "ListOpsItemEventsRequest":{ "type":"structure", "members":{ @@ -11648,6 +11981,14 @@ "min":20, "pattern":"(^mi-[0-9a-f]{17}$)|(^eks_c:[0-9A-Za-z][A-Za-z0-9\\-_]{0,99}_\\w{17}$)" }, + "ManagedStatus":{ + "type":"string", + "enum":[ + "All", + "Managed", + "Unmanaged" + ] + }, "MaxConcurrency":{ "type":"string", "max":7, @@ -11753,6 +12094,225 @@ } }, "NextToken":{"type":"string"}, + "Node":{ + "type":"structure", + "members":{ + "CaptureTime":{ + "shape":"NodeCaptureTime", + "documentation":"The UTC timestamp for when the managed node data was last captured.
" + }, + "Id":{ + "shape":"NodeId", + "documentation":"The ID of the managed node.
" + }, + "Owner":{ + "shape":"NodeOwnerInfo", + "documentation":"Information about the ownership of the managed node.
" + }, + "Region":{ + "shape":"NodeRegion", + "documentation":"The Amazon Web Services Region that a managed node was created in or assigned to.
" + }, + "NodeType":{ + "shape":"NodeType", + "documentation":"Information about the type of node.
" + } + }, + "documentation":"Details about an individual managed node.
" + }, + "NodeAccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, + "NodeAggregator":{ + "type":"structure", + "required":[ + "AggregatorType", + "TypeName", + "AttributeName" + ], + "members":{ + "AggregatorType":{ + "shape":"NodeAggregatorType", + "documentation":"The aggregator type for limiting a node summary. Currently, only Count
is supported.
The data type name to use for viewing counts of nodes. Currently, only Instance
is supported.
The name of a node attribute on which to limit the count of nodes.
" + }, + "Aggregators":{ + "shape":"NodeAggregatorList", + "documentation":"Information about aggregators used to refine a node summary.
" + } + }, + "documentation":"One or more aggregators for viewing counts of nodes using different dimensions.
" + }, + "NodeAggregatorList":{ + "type":"list", + "member":{"shape":"NodeAggregator"}, + "max":2, + "min":1 + }, + "NodeAggregatorType":{ + "type":"string", + "enum":["Count"] + }, + "NodeAttributeName":{ + "type":"string", + "enum":[ + "AgentVersion", + "PlatformName", + "PlatformType", + "PlatformVersion", + "Region", + "ResourceType" + ] + }, + "NodeCaptureTime":{"type":"timestamp"}, + "NodeFilter":{ + "type":"structure", + "required":[ + "Key", + "Values" + ], + "members":{ + "Key":{ + "shape":"NodeFilterKey", + "documentation":"The name of the filter.
" + }, + "Values":{ + "shape":"NodeFilterValueList", + "documentation":"A filter value supported by the specified key. For example, for the key PlatformType
, supported values include Linux
and Windows
.
The type of filter operator.
" + } + }, + "documentation":"The filters for the operation.
" + }, + "NodeFilterKey":{ + "type":"string", + "enum":[ + "AgentType", + "AgentVersion", + "ComputerName", + "InstanceId", + "InstanceStatus", + "IpAddress", + "ManagedStatus", + "PlatformName", + "PlatformType", + "PlatformVersion", + "ResourceType", + "OrganizationalUnitId", + "OrganizationalUnitPath", + "Region", + "AccountId" + ] + }, + "NodeFilterList":{ + "type":"list", + "member":{"shape":"NodeFilter"}, + "max":5, + "min":1 + }, + "NodeFilterOperatorType":{ + "type":"string", + "enum":[ + "Equal", + "NotEqual", + "BeginWith" + ] + }, + "NodeFilterValue":{ + "type":"string", + "max":512, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + }, + "NodeFilterValueList":{ + "type":"list", + "member":{"shape":"NodeFilterValue"}, + "max":5, + "min":1 + }, + "NodeId":{ + "type":"string", + "pattern":"(^i-(\\w{8}|\\w{17})$)|(^mi-\\w{17}$)" + }, + "NodeList":{ + "type":"list", + "member":{"shape":"Node"}, + "max":50, + "min":0 + }, + "NodeOrganizationalUnitId":{ + "type":"string", + "max":68, + "min":1, + "pattern":"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$" + }, + "NodeOrganizationalUnitPath":{ + "type":"string", + "max":512, + "min":1 + }, + "NodeOwnerInfo":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"NodeAccountId", + "documentation":"The ID of the Amazon Web Services account that owns the managed node.
" + }, + "OrganizationalUnitId":{ + "shape":"NodeOrganizationalUnitId", + "documentation":"The ID of the organization unit (OU) that the account is part of.
" + }, + "OrganizationalUnitPath":{ + "shape":"NodeOrganizationalUnitPath", + "documentation":"The path for the organizational unit (OU) that owns the managed node. The path for the OU is built using the IDs of the organization, root, and all OUs in the path down to and including the OU. For example:
o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-ghi0-awsccccc/ou-jkl0-awsddddd/
Information about ownership of a managed node.
" + }, + "NodeRegion":{ + "type":"string", + "max":64, + "min":1 + }, + "NodeSummary":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"}, + "max":3, + "min":0 + }, + "NodeSummaryList":{ + "type":"list", + "member":{"shape":"NodeSummary"}, + "max":50, + "min":0 + }, + "NodeType":{ + "type":"structure", + "members":{ + "Instance":{ + "shape":"InstanceInfo", + "documentation":"Information about a specific managed node.
" + } + }, + "documentation":"Information about a managed node's type.
", + "union":true + }, + "NodeTypeName":{ + "type":"string", + "enum":["Instance"] + }, "NonCompliantSummary":{ "type":"structure", "members":{ @@ -13035,7 +13595,7 @@ }, "ARN":{ "shape":"String", - "documentation":"The (ARN) of the last user to update the parameter.
" + "documentation":"The Amazon Resource Name (ARN) of the parameter.
" }, "Type":{ "shape":"ParameterType", @@ -14099,6 +14659,12 @@ ] }, "Region":{"type":"string"}, + "RegionList":{ + "type":"list", + "member":{"shape":"Region"}, + "max":50, + "min":1 + }, "Regions":{ "type":"list", "member":{"shape":"Region"}, @@ -14389,7 +14955,7 @@ "members":{ "SettingId":{ "shape":"ServiceSettingId", - "documentation":"The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be one of the following.
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
The Amazon Resource Name (ARN) of the service setting to reset. The setting ID can be one of the following.
/ssm/appmanager/appmanager-enabled
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/automation/enable-adaptive-concurrency
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
The request body of the ResetServiceSetting API operation.
" @@ -15641,6 +16207,33 @@ } } }, + "StartExecutionPreviewRequest":{ + "type":"structure", + "required":["DocumentName"], + "members":{ + "DocumentName":{ + "shape":"DocumentName", + "documentation":"The name of the Automation runbook to run. The result of the execution preview indicates what the impact would be of running this runbook.
" + }, + "DocumentVersion":{ + "shape":"DocumentVersion", + "documentation":"The version of the Automation runbook to run. The default value is $DEFAULT
.
Information about the inputs that can be specified for the preview operation.
" + } + } + }, + "StartExecutionPreviewResponse":{ + "type":"structure", + "members":{ + "ExecutionPreviewId":{ + "shape":"ExecutionPreviewId", + "documentation":"The ID of the execution preview generated by the system.
" + } + } + }, "StartSessionRequest":{ "type":"structure", "required":["Target"], @@ -15864,6 +16457,11 @@ "type":"list", "member":{"shape":"StepExecution"} }, + "StepPreviewMap":{ + "type":"map", + "key":{"shape":"ImpactType"}, + "value":{"shape":"Integer"} + }, "StopAutomationExecutionRequest":{ "type":"structure", "required":["AutomationExecutionId"], @@ -16076,6 +16674,26 @@ "type":"list", "member":{"shape":"ParameterValue"} }, + "TargetPreview":{ + "type":"structure", + "members":{ + "Count":{ + "shape":"Integer", + "documentation":"The number of resources of a certain type included in an execution preview.
" + }, + "TargetType":{ + "shape":"String", + "documentation":"A type of resource that was included in the execution preview.
" + } + }, + "documentation":"Information about the resources that would be included in the actual runbook execution, if it were to be run.
" + }, + "TargetPreviewList":{ + "type":"list", + "member":{"shape":"TargetPreview"}, + "max":50, + "min":0 + }, "TargetType":{ "type":"string", "max":200, @@ -16228,6 +16846,14 @@ "documentation":"The operating systems you specified isn't supported, or the operation isn't supported for the operating system.
", "exception":true }, + "UnsupportedOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"This operation is not supported for the current account. You must first enable the Systems Manager integrated experience in your account.
", + "exception":true + }, "UnsupportedParameterType":{ "type":"structure", "members":{ @@ -16941,7 +17567,7 @@ }, "GlobalFilters":{ "shape":"PatchFilterGroup", - "documentation":"A set of global filters used to include patches in the baseline.
" + "documentation":"A set of global filters used to include patches in the baseline.
The GlobalFilters
parameter can be configured only by using the CLI or an Amazon Web Services SDK. It can't be configured from the Patch Manager console, and its value isn't displayed in the console.
The Amazon Resource Name (ARN) of the service setting to update. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled
. The setting ID can be one of the following.
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
Permissions to update the /ssm/managed-instance/default-ec2-instance-management-role
setting should only be provided to administrators. Implement least privilege access when allowing individuals to configure or modify the Default Host Management Configuration.
The Amazon Resource Name (ARN) of the service setting to update. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled
. The setting ID can be one of the following.
/ssm/appmanager/appmanager-enabled
/ssm/automation/customer-script-log-destination
/ssm/automation/customer-script-log-group-name
/ssm/automation/enable-adaptive-concurrency
/ssm/documents/console/public-sharing-permission
/ssm/managed-instance/activation-tier
/ssm/managed-instance/default-ec2-instance-management-role
/ssm/opsinsights/opscenter
/ssm/parameter-store/default-parameter-tier
/ssm/parameter-store/high-throughput-enabled
Permissions to update the /ssm/managed-instance/default-ec2-instance-management-role
setting should only be provided to administrators. Implement least privilege access when allowing individuals to configure or modify the Default Host Management Configuration.
The new value to specify for the service setting. The following list specifies the available values for each setting.
For /ssm/managed-instance/default-ec2-instance-management-role
, enter the name of an IAM role.
For /ssm/automation/customer-script-log-destination
, enter CloudWatch
.
For /ssm/automation/customer-script-log-group-name
, enter the name of an Amazon CloudWatch Logs log group.
For /ssm/documents/console/public-sharing-permission
, enter Enable
or Disable
.
For /ssm/managed-instance/activation-tier
, enter standard
or advanced
.
For /ssm/opsinsights/opscenter
, enter Enabled
or Disabled
.
For /ssm/parameter-store/default-parameter-tier
, enter Standard
, Advanced
, or Intelligent-Tiering
For /ssm/parameter-store/high-throughput-enabled
, enter true
or false
.
The new value to specify for the service setting. The following list specifies the available values for each setting.
For /ssm/appmanager/appmanager-enabled
, enter True
or False
.
For /ssm/automation/customer-script-log-destination
, enter CloudWatch
.
For /ssm/automation/customer-script-log-group-name
, enter the name of an Amazon CloudWatch Logs log group.
For /ssm/documents/console/public-sharing-permission
, enter Enable
or Disable
.
For /ssm/managed-instance/activation-tier
, enter standard
or advanced
.
For /ssm/managed-instance/default-ec2-instance-management-role
, enter the name of an IAM role.
For /ssm/opsinsights/opscenter
, enter Enabled
or Disabled
.
For /ssm/parameter-store/default-parameter-tier
, enter Standard
, Advanced
, or Intelligent-Tiering
For /ssm/parameter-store/high-throughput-enabled
, enter true
or false
.
The request body of the UpdateServiceSetting API operation.
" @@ -17106,6 +17732,18 @@ "type":"list", "member":{"shape":"ValidNextStep"} }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ReasonCode":{ + "shape":"String", + "documentation":"The reason code for the invalid request.
" + } + }, + "documentation":"The request isn't valid. Verify that you entered valid contents for the command and try again.
", + "exception":true + }, "Version":{ "type":"string", "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" diff --git a/botocore/data/xray/2016-04-12/endpoint-rule-set-1.json b/botocore/data/xray/2016-04-12/endpoint-rule-set-1.json index 1d5bbea62c..1ff2186fad 100644 --- a/botocore/data/xray/2016-04-12/endpoint-rule-set-1.json +++ b/botocore/data/xray/2016-04-12/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/xray/2016-04-12/service-2.json b/botocore/data/xray/2016-04-12/service-2.json index 3a77414624..2c77f09d06 100644 --- a/botocore/data/xray/2016-04-12/service-2.json +++ b/botocore/data/xray/2016-04-12/service-2.json @@ -4,10 +4,12 @@ "apiVersion":"2016-04-12", "endpointPrefix":"xray", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS X-Ray", "serviceId":"XRay", "signatureVersion":"v4", - "uid":"xray-2016-04-12" + "uid":"xray-2016-04-12", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchGetTraces":{ @@ -22,7 +24,22 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"Retrieves a list of traces specified by ID. Each trace is a collection of segment documents that originates from a single request. Use GetTraceSummaries
to get a list of trace IDs.
You cannot find traces through this API if Transaction Search is enabled since trace is not indexed in X-Ray.
Retrieves a list of traces specified by ID. Each trace is a collection of segment documents that originates from a single request. Use GetTraceSummaries
to get a list of trace IDs.
Cancels an ongoing trace retrieval job initiated by StartTraceRetrieval
using the provided RetrievalToken
. A successful cancellation will return an HTTP 200 response.
Retrieves all active group details.
" }, + "GetIndexingRules":{ + "name":"GetIndexingRules", + "http":{ + "method":"POST", + "requestUri":"/GetIndexingRules" + }, + "input":{"shape":"GetIndexingRulesRequest"}, + "output":{"shape":"GetIndexingRulesResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":"Retrieves all indexing rules.
Indexing rules are used to determine the server-side sampling rate for spans ingested through the CloudWatchLogs destination and indexed by X-Ray. For more information, see Transaction Search.
" + }, "GetInsight":{ "name":"GetInsight", "http":{ @@ -194,6 +225,21 @@ ], "documentation":"Retrieves the summaries of all insights in the specified group matching the provided filter values.
" }, + "GetRetrievedTracesGraph":{ + "name":"GetRetrievedTracesGraph", + "http":{ + "method":"POST", + "requestUri":"/GetRetrievedTracesGraph" + }, + "input":{"shape":"GetRetrievedTracesGraphRequest"}, + "output":{"shape":"GetRetrievedTracesGraphResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":" Retrieves a service graph for traces based on the specified RetrievalToken
from the CloudWatch log group generated by Transaction Search. This API does not initiate a retrieval job. You must first execute StartTraceRetrieval
to obtain the required RetrievalToken
.
The trace graph describes services that process incoming requests and any downstream services they call, which may include Amazon Web Services resources, external APIs, or databases.
The response is empty until the RetrievalStatus
is COMPLETE. Retry the request after the status changes from RUNNING or SCHEDULED to COMPLETE to access the full service graph.
When CloudWatch log is the destination, this API can support cross-account observability and service graph retrieval across linked accounts.
For retrieving graphs from X-Ray directly as opposed to the Transaction-Search Log group, see GetTraceGraph.
" + }, "GetSamplingRules":{ "name":"GetSamplingRules", "http":{ @@ -278,6 +324,20 @@ ], "documentation":"Retrieves a service graph for one or more specific trace IDs.
" }, + "GetTraceSegmentDestination":{ + "name":"GetTraceSegmentDestination", + "http":{ + "method":"POST", + "requestUri":"/GetTraceSegmentDestination" + }, + "input":{"shape":"GetTraceSegmentDestinationRequest"}, + "output":{"shape":"GetTraceSegmentDestinationResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":" Retrieves the current destination of data sent to PutTraceSegments
and OpenTelemetry API. The Transaction Search feature requires a CloudWatchLogs destination. For more information, see Transaction Search and OpenTelemetry.
Retrieves IDs and annotations for traces available for a specified time frame using an optional filter. To get the full traces, pass the trace IDs to BatchGetTraces
.
A filter expression can target traced requests that hit specific service nodes or edges, have errors, or come from a known user. For example, the following filter expression targets traces that pass through api.example.com
:
service(\"api.example.com\")
This filter expression finds traces that have an annotation named account
with the value 12345
:
annotation.account = \"12345\"
For a full list of indexed fields and keywords that you can use in filter expressions, see Using Filter Expressions in the Amazon Web Services X-Ray Developer Guide.
" + "documentation":"Retrieves IDs and annotations for traces available for a specified time frame using an optional filter. To get the full traces, pass the trace IDs to BatchGetTraces
.
A filter expression can target traced requests that hit specific service nodes or edges, have errors, or come from a known user. For example, the following filter expression targets traces that pass through api.example.com
:
service(\"api.example.com\")
This filter expression finds traces that have an annotation named account
with the value 12345
:
annotation.account = \"12345\"
For a full list of indexed fields and keywords that you can use in filter expressions, see Use filter expressions in the Amazon Web Services X-Ray Developer Guide.
" }, "ListResourcePolicies":{ "name":"ListResourcePolicies", @@ -306,6 +366,21 @@ ], "documentation":"Returns the list of resource policies in the target Amazon Web Services account.
" }, + "ListRetrievedTraces":{ + "name":"ListRetrievedTraces", + "http":{ + "method":"POST", + "requestUri":"/ListRetrievedTraces" + }, + "input":{"shape":"ListRetrievedTracesRequest"}, + "output":{"shape":"ListRetrievedTracesResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":" Retrieves a list of traces for a given RetrievalToken
from the CloudWatch log group generated by Transaction Search. For information on what each trace returns, see BatchGetTraces.
This API does not initiate a retrieval job. To start a trace retrieval, use StartTraceRetrieval
, which generates the required RetrievalToken
.
When the RetrievalStatus
is not COMPLETE, the API will return an empty response. Retry the request once the retrieval has completed to access the full list of traces.
For cross-account observability, this API can retrieve traces from linked accounts when CloudWatch log is the destination across relevant accounts. For more details, see CloudWatch cross-account observability.
For retrieving data from X-Ray directly as opposed to the Transaction-Search Log group, see BatchGetTraces.
" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -379,7 +454,22 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottledException"} ], - "documentation":"Uploads segment documents to Amazon Web Services X-Ray. The X-Ray SDK generates segment documents and sends them to the X-Ray daemon, which uploads them in batches. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.
Segments must include the following fields. For the full segment document schema, see Amazon Web Services X-Ray Segment Documents in the Amazon Web Services X-Ray Developer Guide.
Required segment document fields
name
- The name of the service that handled the request.
id
- A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.
trace_id
- A unique identifier that connects all segments and subsegments originating from a single client request.
start_time
- Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010
or 1.480615200010E9
.
end_time
- Time the segment or subsegment was closed. For example, 1480615200.090
or 1.480615200090E9
. Specify either an end_time
or in_progress
.
in_progress
- Set to true
instead of specifying an end_time
to record that a segment has been started, but is not complete. Send an in-progress segment when your application receives a request that will take a long time to serve, to trace that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.
A trace_id
consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. This includes:
Trace ID Format
The version number, for instance, 1
.
The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200
seconds, or 58406520
in hexadecimal.
A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.
Uploads segment documents to Amazon Web Services X-Ray. A segment document can be a completed segment, an in-progress segment, or an array of subsegments.
Segments must include the following fields. For the full segment document schema, see Amazon Web Services X-Ray Segment Documents in the Amazon Web Services X-Ray Developer Guide.
Required segment document fields
name
- The name of the service that handled the request.
id
- A 64-bit identifier for the segment, unique among segments in the same trace, in 16 hexadecimal digits.
trace_id
- A unique identifier that connects all segments and subsegments originating from a single client request.
start_time
- Time the segment or subsegment was created, in floating point seconds in epoch time, accurate to milliseconds. For example, 1480615200.010
or 1.480615200010E9
.
end_time
- Time the segment or subsegment was closed. For example, 1480615200.090
or 1.480615200090E9
. Specify either an end_time
or in_progress
.
in_progress
- Set to true
instead of specifying an end_time
to record that a segment has been started, but is not complete. Send an in-progress segment when your application receives a request that will take a long time to serve, to trace that the request was received. When the response is sent, send the complete segment to overwrite the in-progress segment.
A trace_id
consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. For trace IDs created by an X-Ray SDK, or by Amazon Web Services services integrated with X-Ray, a trace ID includes:
Trace ID Format
The version number, for instance, 1
.
The time of the original request, in Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200
seconds, or 58406520
in hexadecimal.
A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits.
Trace IDs created via OpenTelemetry have a different format based on the W3C Trace Context specification. A W3C trace ID must be formatted in the X-Ray trace ID format when sending to X-Ray. For example, a W3C trace ID 4efaaf4d1e8720b39541901950019ee5
should be formatted as 1-4efaaf4d-1e8720b39541901950019ee5
when sending to X-Ray. While X-Ray trace IDs include the original request timestamp in Unix epoch time, this is not required or validated.
Initiates a trace retrieval process using the specified time range and for the give trace IDs on Transaction Search generated by the CloudWatch log group. For more information, see Transaction Search.
API returns a RetrievalToken
, which can be used with ListRetrievedTraces
or GetRetrievedTracesGraph
to fetch results. Retrievals will time out after 60 minutes. To execute long time ranges, consider segmenting into multiple retrievals.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to retrieve data from a linked source account, as long as both accounts have transaction search enabled.
For retrieving data from X-Ray directly as opposed to the Transaction-Search Log group, see BatchGetTraces.
" }, "TagResource":{ "name":"TagResource", @@ -426,6 +516,21 @@ ], "documentation":"Updates a group resource.
" }, + "UpdateIndexingRule":{ + "name":"UpdateIndexingRule", + "http":{ + "method":"POST", + "requestUri":"/UpdateIndexingRule" + }, + "input":{"shape":"UpdateIndexingRuleRequest"}, + "output":{"shape":"UpdateIndexingRuleResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Modifies an indexing rule’s configuration.
Indexing rules are used for determining the sampling rate for spans indexed from CloudWatch Logs. For more information, see Transaction Search.
" + }, "UpdateSamplingRule":{ "name":"UpdateSamplingRule", "http":{ @@ -439,6 +544,20 @@ {"shape":"ThrottledException"} ], "documentation":"Modifies a sampling rule's configuration.
" + }, + "UpdateTraceSegmentDestination":{ + "name":"UpdateTraceSegmentDestination", + "http":{ + "method":"POST", + "requestUri":"/UpdateTraceSegmentDestination" + }, + "input":{"shape":"UpdateTraceSegmentDestinationRequest"}, + "output":{"shape":"UpdateTraceSegmentDestinationResult"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottledException"} + ], + "documentation":" Modifies the destination of data sent to PutTraceSegments
. The Transaction Search feature requires the CloudWatchLogs destination. For more information, see Transaction Search.
Retrieval token.
" + } + } + }, + "CancelTraceRetrievalResult":{ + "type":"structure", + "members":{ + } + }, "ClientID":{ "type":"string", "max":24, @@ -913,7 +1047,7 @@ "members":{ "ThrottleCount":{ "shape":"NullableLong", - "documentation":"The number of requests that failed with a 419 throttling status code.
" + "documentation":"The number of requests that failed with a 429 throttling status code.
" }, "OtherCount":{ "shape":"NullableLong", @@ -1098,6 +1232,28 @@ } } }, + "GetIndexingRulesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, + "GetIndexingRulesResult":{ + "type":"structure", + "members":{ + "IndexingRules":{ + "shape":"IndexingRuleList", + "documentation":"Retrieves all indexing rules.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, "GetInsightEventsMaxResults":{ "type":"integer", "max":50, @@ -1267,6 +1423,37 @@ } } }, + "GetRetrievedTracesGraphRequest":{ + "type":"structure", + "required":["RetrievalToken"], + "members":{ + "RetrievalToken":{ + "shape":"RetrievalToken", + "documentation":"Retrieval token.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, + "GetRetrievedTracesGraphResult":{ + "type":"structure", + "members":{ + "RetrievalStatus":{ + "shape":"RetrievalStatus", + "documentation":"Status of the retrieval.
" + }, + "Services":{ + "shape":"RetrievedServicesList", + "documentation":"Retrieved services.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, "GetSamplingRulesRequest":{ "type":"structure", "members":{ @@ -1477,6 +1664,24 @@ } } }, + "GetTraceSegmentDestinationRequest":{ + "type":"structure", + "members":{ + } + }, + "GetTraceSegmentDestinationResult":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"TraceSegmentDestination", + "documentation":"Retrieves the current destination.
" + }, + "Status":{ + "shape":"TraceSegmentDestinationStatus", + "documentation":"Status of the retrieval.
" + } + } + }, "GetTraceSummariesRequest":{ "type":"structure", "required":[ @@ -1494,7 +1699,7 @@ }, "TimeRangeType":{ "shape":"TimeRangeType", - "documentation":"A parameter to indicate whether to query trace summaries by TraceId, Event (trace update time), or Service (segment end time).
" + "documentation":"Query trace summaries by TraceId (trace start time), Event (trace update time), or Service (trace segment end time).
" }, "Sampling":{ "shape":"NullableBoolean", @@ -1535,6 +1740,24 @@ } } }, + "GraphLink":{ + "type":"structure", + "members":{ + "ReferenceType":{ + "shape":"String", + "documentation":"Relationship of a trace to the corresponding service.
" + }, + "SourceTraceId":{ + "shape":"String", + "documentation":"Source trace of a link relationship.
" + }, + "DestinationTraceIds":{ + "shape":"TraceIdList", + "documentation":"Destination traces of a link relationship.
" + } + }, + "documentation":"The relation between two services.
" + }, "Group":{ "type":"structure", "members":{ @@ -1649,6 +1872,52 @@ }, "documentation":"Information about an HTTP request.
" }, + "IndexingRule":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"RuleName", + "documentation":"The name of the indexing rule.
" + }, + "ModifiedAt":{ + "shape":"Timestamp", + "documentation":"Displays when the rule was last modified, in Unix time seconds.
" + }, + "Rule":{ + "shape":"IndexingRuleValue", + "documentation":"The indexing rule.
" + } + }, + "documentation":"Rule used to determine the server-side sampling rate for spans ingested through the CloudWatchLogs destination and indexed by X-Ray.
" + }, + "IndexingRuleList":{ + "type":"list", + "member":{"shape":"IndexingRule"}, + "max":10, + "min":1 + }, + "IndexingRuleValue":{ + "type":"structure", + "members":{ + "Probabilistic":{ + "shape":"ProbabilisticRuleValue", + "documentation":"Indexing rule configuration that is used to probabilistically sample traceIds.
" + } + }, + "documentation":"The indexing rule configuration.
", + "union":true + }, + "IndexingRuleValueUpdate":{ + "type":"structure", + "members":{ + "Probabilistic":{ + "shape":"ProbabilisticRuleValueUpdate", + "documentation":"Indexing rule configuration that is used to probabilistically sample traceIds.
" + } + }, + "documentation":"Update to an indexing rule.
", + "union":true + }, "Insight":{ "type":"structure", "members":{ @@ -1905,6 +2174,12 @@ "documentation":"The request is missing required parameters or has invalid parameters.
", "exception":true }, + "LinksList":{ + "type":"list", + "member":{"shape":"GraphLink"}, + "max":100, + "min":0 + }, "ListResourcePoliciesRequest":{ "type":"structure", "members":{ @@ -1927,6 +2202,45 @@ } } }, + "ListRetrievedTracesRequest":{ + "type":"structure", + "required":["RetrievalToken"], + "members":{ + "RetrievalToken":{ + "shape":"RetrievalToken", + "documentation":"Retrieval token.
" + }, + "TraceFormat":{ + "shape":"TraceFormatType", + "documentation":"Format of the requested traces.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, + "ListRetrievedTracesResult":{ + "type":"structure", + "members":{ + "RetrievalStatus":{ + "shape":"RetrievalStatus", + "documentation":"Status of the retrieval.
" + }, + "TraceFormat":{ + "shape":"TraceFormatType", + "documentation":"Format of the requested traces.
" + }, + "Traces":{ + "shape":"TraceSpanList", + "documentation":"Full traces for the specified requests.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"Specify the pagination token returned by a previous request to retrieve the next page of indexes.
" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceARN"], @@ -2007,6 +2321,32 @@ "max":9999, "min":1 }, + "ProbabilisticRuleValue":{ + "type":"structure", + "required":["DesiredSamplingPercentage"], + "members":{ + "DesiredSamplingPercentage":{ + "shape":"NullableDouble", + "documentation":"Configured sampling percentage of traceIds. Note that sampling can be subject to limits to ensure completeness of data.
" + }, + "ActualSamplingPercentage":{ + "shape":"NullableDouble", + "documentation":"Applied sampling percentage of traceIds.
" + } + }, + "documentation":"The indexing rule configuration for probabilistic sampling.
" + }, + "ProbabilisticRuleValueUpdate":{ + "type":"structure", + "required":["DesiredSamplingPercentage"], + "members":{ + "DesiredSamplingPercentage":{ + "shape":"NullableDouble", + "documentation":"Configured sampling percentage of traceIds. Note that sampling can be subject to limits to ensure completeness of data.
" + } + }, + "documentation":"Update to the indexing rule configuration for probabilistic sampling.
" + }, "PutEncryptionConfigRequest":{ "type":"structure", "required":["Type"], @@ -2265,6 +2605,57 @@ "type":"list", "member":{"shape":"ResponseTimeRootCause"} }, + "RetrievalStatus":{ + "type":"string", + "enum":[ + "SCHEDULED", + "RUNNING", + "COMPLETE", + "FAILED", + "CANCELLED", + "TIMEOUT" + ] + }, + "RetrievalToken":{ + "type":"string", + "max":1020, + "min":0 + }, + "RetrievedService":{ + "type":"structure", + "members":{ + "Service":{"shape":"Service"}, + "Links":{ + "shape":"LinksList", + "documentation":"Relation between two 2 services.
" + } + }, + "documentation":"Retrieved information about an application that processed requests, users that made requests, or downstream services, resources, and applications that an application used.
" + }, + "RetrievedServicesList":{ + "type":"list", + "member":{"shape":"RetrievedService"}, + "max":1000, + "min":0 + }, + "RetrievedTrace":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"TraceId", + "documentation":"The unique identifier for the span.
" + }, + "Duration":{ + "shape":"NullableDouble", + "documentation":"The length of time in seconds between the start time of the root span and the end time of the last span that completed.
" + }, + "Spans":{ + "shape":"SpanList", + "documentation":"Spans that comprise the trace.
" + } + }, + "documentation":"Retrieved collection of spans with matching trace IDs.
" + }, "RootCauseException":{ "type":"structure", "members":{ @@ -2581,7 +2972,7 @@ "documentation":"The segment document.
" } }, - "documentation":"A segment from a trace that has been ingested by the X-Ray service. The segment can be compiled from documents uploaded with PutTraceSegments, or an inferred
segment for a downstream service, generated from a subsegment sent by the service that called it.
For the full segment document schema, see Amazon Web Services X-Ray Segment Documents in the Amazon Web Services X-Ray Developer Guide.
" + "documentation":"A segment from a trace that has been ingested by the X-Ray service. The segment can be compiled from documents uploaded with PutTraceSegments, or an inferred
segment for a downstream service, generated from a subsegment sent by the service that called it.
For the full segment document schema, see Amazon Web Services X-Ray segment documents in the Amazon Web Services X-Ray Developer Guide.
" }, "SegmentDocument":{ "type":"string", @@ -2718,6 +3109,66 @@ "type":"string", "max":64 }, + "Span":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"SpanId", + "documentation":"The span ID.
" + }, + "Document":{ + "shape":"SpanDocument", + "documentation":"The span document.
" + } + }, + "documentation":"A span from a trace that has been ingested by the X-Ray service. A span represents a unit of work or an operation performed by a service.
" + }, + "SpanDocument":{ + "type":"string", + "max":204800 + }, + "SpanId":{ + "type":"string", + "max":16, + "min":1 + }, + "SpanList":{ + "type":"list", + "member":{"shape":"Span"}, + "max":100, + "min":0 + }, + "StartTraceRetrievalRequest":{ + "type":"structure", + "required":[ + "TraceIds", + "StartTime", + "EndTime" + ], + "members":{ + "TraceIds":{ + "shape":"TraceIdListForRetrieval", + "documentation":"Specify the trace IDs of the traces to be retrieved.
" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"The start of the time range to retrieve traces. The range is inclusive, so the specified start time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"The end of the time range to retrieve traces. The range is inclusive, so the specified end time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
" + } + } + }, + "StartTraceRetrievalResult":{ + "type":"structure", + "members":{ + "RetrievalToken":{ + "shape":"RetrievalToken", + "documentation":"Retrieval token.
" + } + } + }, "String":{"type":"string"}, "Tag":{ "type":"structure", @@ -2899,6 +3350,13 @@ "type":"list", "member":{"shape":"AvailabilityZoneDetail"} }, + "TraceFormatType":{ + "type":"string", + "enum":[ + "XRAY", + "OTEL" + ] + }, "TraceId":{ "type":"string", "max":35, @@ -2908,6 +3366,12 @@ "type":"list", "member":{"shape":"TraceId"} }, + "TraceIdListForRetrieval":{ + "type":"list", + "member":{"shape":"TraceId"}, + "max":100, + "min":0 + }, "TraceInstanceIds":{ "type":"list", "member":{"shape":"InstanceIdDetail"} @@ -2920,11 +3384,31 @@ "type":"list", "member":{"shape":"ResourceARNDetail"} }, + "TraceSegmentDestination":{ + "type":"string", + "enum":[ + "XRay", + "CloudWatchLogs" + ] + }, + "TraceSegmentDestinationStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACTIVE" + ] + }, "TraceSegmentDocument":{"type":"string"}, "TraceSegmentDocumentList":{ "type":"list", "member":{"shape":"TraceSegmentDocument"} }, + "TraceSpanList":{ + "type":"list", + "member":{"shape":"RetrievedTrace"}, + "max":5, + "min":0 + }, "TraceSummary":{ "type":"structure", "members":{ @@ -3141,6 +3625,32 @@ } } }, + "UpdateIndexingRuleRequest":{ + "type":"structure", + "required":[ + "Name", + "Rule" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"Name of the indexing rule to be updated.
" + }, + "Rule":{ + "shape":"IndexingRuleValueUpdate", + "documentation":"Rule configuration to be updated.
" + } + } + }, + "UpdateIndexingRuleResult":{ + "type":"structure", + "members":{ + "IndexingRule":{ + "shape":"IndexingRule", + "documentation":"Updated indexing rule.
" + } + } + }, "UpdateSamplingRuleRequest":{ "type":"structure", "required":["SamplingRuleUpdate"], @@ -3160,6 +3670,28 @@ } } }, + "UpdateTraceSegmentDestinationRequest":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"TraceSegmentDestination", + "documentation":"The configured destination of trace segments.
" + } + } + }, + "UpdateTraceSegmentDestinationResult":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"TraceSegmentDestination", + "documentation":"The destination of the trace segments.
" + }, + "Status":{ + "shape":"TraceSegmentDestinationStatus", + "documentation":"The status of the update.
" + } + } + }, "ValueWithServiceIds":{ "type":"structure", "members":{ diff --git a/botocore/endpoint_provider.py b/botocore/endpoint_provider.py index 38b0a5ffe6..0fbd802b8d 100644 --- a/botocore/endpoint_provider.py +++ b/botocore/endpoint_provider.py @@ -42,7 +42,7 @@ logger = logging.getLogger(__name__) TEMPLATE_STRING_RE = re.compile(r"\{[a-zA-Z#]+\}") -GET_ATTR_RE = re.compile(r"(\w+)\[(\d+)\]") +GET_ATTR_RE = re.compile(r"(\w*)\[(\d+)\]") VALID_HOST_LABEL_RE = re.compile( r"^(?!-)[a-zA-Z\d-]{1,63}(?= len(value): return None return value[index] diff --git a/botocore/utils.py b/botocore/utils.py index 314d30516d..a54f4a39d3 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -1776,6 +1776,10 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): 0 ].status_code in (301, 302, 307) is_permanent_redirect = error_code == 'PermanentRedirect' + is_opt_in_region_redirect = ( + error_code == 'IllegalLocationConstraintException' + and operation.name != 'CreateBucket' + ) if not any( [ is_special_head_object, @@ -1783,6 +1787,7 @@ def redirect_from_error(self, request_dict, response, operation, **kwargs): is_permanent_redirect, is_special_head_bucket, is_redirect_status, + is_opt_in_region_redirect, ] ): return diff --git a/docs/source/conf.py b/docs/source/conf.py index 339de14631..f8fcced099 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.66' +release = '1.35.67' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/endpoint-rules/notifications/endpoint-tests-1.json b/tests/functional/endpoint-rules/notifications/endpoint-tests-1.json new file mode 100644 index 0000000000..33652ab16d --- /dev/null +++ b/tests/functional/endpoint-rules/notifications/endpoint-tests-1.json @@ -0,0 +1,201 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://notifications.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/notificationscontacts/endpoint-tests-1.json b/tests/functional/endpoint-rules/notificationscontacts/endpoint-tests-1.json new file mode 100644 index 0000000000..266e8a2ac6 --- /dev/null +++ b/tests/functional/endpoint-rules/notificationscontacts/endpoint-tests-1.json @@ -0,0 +1,313 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://notifications-contacts-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://notifications-contacts.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://notifications-contacts-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://notifications-contacts.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://notifications-contacts-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://notifications-contacts.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://notifications-contacts-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://notifications-contacts.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://notifications-contacts-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://notifications-contacts.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://notifications-contacts-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://notifications-contacts.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://notifications-contacts-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://notifications-contacts.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/unit/test_endpoint_provider.py b/tests/unit/test_endpoint_provider.py index c1f82ace2e..8bc8c429d0 100644 --- a/tests/unit/test_endpoint_provider.py +++ b/tests/unit/test_endpoint_provider.py @@ -510,3 +510,33 @@ def test_aws_is_virtual_hostable_s3_bucket_allow_subdomains( rule_lib.aws_is_virtual_hostable_s3_bucket(bucket, True) == expected_value ) + + +@pytest.mark.parametrize( + "value, path, expected_value", + [ + ({"foo": ['bar']}, 'baz[0]', None), # Missing index + ({"foo": ['bar']}, 'foo[1]', None), # Out of range index + ({"foo": ['bar']}, 'foo[0]', "bar"), # Named index + (("foo",), '[0]', "foo"), # Bare index + ({"foo": {}}, 'foo.bar[0]', None), # Missing index from split path + ( + {"foo": {'bar': []}}, + 'foo.bar[0]', + None, + ), # Out of range from split path + ( + {"foo": {"bar": "baz"}}, + 'foo.bar', + "baz", + ), # Split path with named index + ( + {"foo": {"bar": ["baz"]}}, + 'foo.bar[0]', + "baz", + ), # Split path with numeric index + ], +) +def test_get_attr(rule_lib, value, path, expected_value): + result = rule_lib.get_attr(value, path) + assert result == expected_value diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 8a92bd6b06..6143183418 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -1879,6 +1879,66 @@ def test_does_not_redirect_if_None_response(self): ) self.assertIsNone(redirect_response) + def test_redirects_on_illegal_location_constraint_from_opt_in_region(self): + request_dict = { + 'url': 'https://il-central-1.amazonaws.com/foo', + 'context': { + 's3_redirect': { + 'bucket': 'foo', + 'redirected': False, + 'params': {'Bucket': 'foo'}, + }, + 'signing': {}, + }, + } + response = ( + None, + { + 'Error': {'Code': 'IllegalLocationConstraintException'}, + 'ResponseMetadata': { + 'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'} + }, + }, + ) + + self.operation.name = 'GetObject' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation + ) + self.assertEqual(redirect_response, 0) + + def test_no_redirect_on_illegal_location_constraint_from_bad_location_constraint( + self, + ): + request_dict = { + 'url': 'https://us-west-2.amazonaws.com/foo', + 'context': { + 's3_redirect': { + 'bucket': 'foo', + 'redirected': False, + 'params': { + 'Bucket': 'foo', + 'CreateBucketConfiguration': { + 'LocationConstraint': 'eu-west-2', + }, + }, + }, + 'signing': {}, + }, + } + response = ( + None, + { + 'Error': {'Code': 'IllegalLocationConstraintException'}, + }, + ) + + self.operation.name = 'CreateBucket' + redirect_response = self.redirector.redirect_from_error( + request_dict, response, self.operation + ) + self.assertIsNone(redirect_response) + def test_get_region_from_response(self): response = ( None,