diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 6b7f6819c8..b45a459809 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -28,6 +28,7 @@ const apigatewayv2 = AWS.RestJSONService("apigateway", "apigateway", "2018-11-29 const app_mesh = AWS.RestJSONService("appmesh", "appmesh", "2019-01-25") const appconfig = AWS.RestJSONService("appconfig", "appconfig", "2019-10-09") const appconfigdata = AWS.RestJSONService("appconfig", "appconfigdata", "2021-11-11") +const appfabric = AWS.RestJSONService("appfabric", "appfabric", "2023-05-19") const appflow = AWS.RestJSONService("appflow", "appflow", "2020-08-23") const appintegrations = AWS.RestJSONService( "app-integrations", "app-integrations", "2020-07-29" @@ -279,6 +280,9 @@ const emr = AWS.JSONService( ) const emr_containers = AWS.RestJSONService("emr-containers", "emr-containers", "2020-10-01") const emr_serverless = AWS.RestJSONService("emr-serverless", "emr-serverless", "2021-07-13") +const entityresolution = AWS.RestJSONService( + "entityresolution", "entityresolution", "2018-05-10" +) const eventbridge = AWS.JSONService("events", "events", "2015-10-07", "1.1", "AWSEvents") const evidently = AWS.RestJSONService("evidently", "evidently", "2021-02-01") const finspace = AWS.RestJSONService("finspace", "finspace", "2021-03-12") @@ -449,6 +453,9 @@ const macie2 = AWS.RestJSONService("macie2", "macie2", "2020-01-01") const managedblockchain = AWS.RestJSONService( "managedblockchain", "managedblockchain", "2018-09-24" ) +const managedblockchain_query = AWS.RestJSONService( + "managedblockchain-query", "managedblockchain-query", "2023-05-04" +) const marketplace_catalog = AWS.RestJSONService( "aws-marketplace", "catalog.marketplace", "2018-09-17" ) @@ -482,6 +489,9 @@ const mediastore = AWS.JSONService( ) const mediastore_data = AWS.RestJSONService("mediastore", "data.mediastore", "2017-09-01") const mediatailor = AWS.RestJSONService("mediatailor", "api.mediatailor", "2018-04-23") +const medical_imaging = AWS.RestJSONService( + "medical-imaging", "medical-imaging", "2023-07-19" +) const memorydb = AWS.JSONService( "memorydb", "memory-db", "2021-01-01", "1.1", "AmazonMemoryDB" ) diff --git a/src/services/apigatewayv2.jl b/src/services/apigatewayv2.jl index 34c8801569..aacd7778c2 100644 --- a/src/services/apigatewayv2.jl +++ b/src/services/apigatewayv2.jl @@ -1149,7 +1149,7 @@ end delete_route_request_parameter(api_id, request_parameter_key, route_id) delete_route_request_parameter(api_id, request_parameter_key, route_id, params::Dict{String,<:Any}) -Deletes a route request parameter. +Deletes a route request parameter. Supported only for WebSocket APIs. # Arguments - `api_id`: The API identifier. diff --git a/src/services/appfabric.jl b/src/services/appfabric.jl new file mode 100644 index 0000000000..f0e606cff0 --- /dev/null +++ b/src/services/appfabric.jl @@ -0,0 +1,1212 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: appfabric +using AWS.Compat +using AWS.UUIDs + +""" + batch_get_user_access_tasks(app_bundle_identifier, task_id_list) + batch_get_user_access_tasks(app_bundle_identifier, task_id_list, params::Dict{String,<:Any}) + +Gets user access details in a batch request. This action polls data from the tasks that are +kicked off by the StartUserAccessTasks action. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `task_id_list`: The tasks IDs to use for the request. + +""" +function batch_get_user_access_tasks( + appBundleIdentifier, taskIdList; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/useraccess/batchget", + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "taskIdList" => taskIdList + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_user_access_tasks( + appBundleIdentifier, + taskIdList, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/useraccess/batchget", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "taskIdList" => taskIdList + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + connect_app_authorization(app_authorization_identifier, app_bundle_identifier) + connect_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Establishes a connection between Amazon Web Services AppFabric and an application, which +allows AppFabric to call the APIs of the application. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle that contains the app authorization to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"authRequest"`: Contains OAuth2 authorization information. This is required if the app + authorization for the request is configured with an OAuth2 (oauth2) authorization type. +""" +function connect_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)/connect"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function connect_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)/connect", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_authorization(app, app_bundle_identifier, auth_type, credential, tenant) + create_app_authorization(app, app_bundle_identifier, auth_type, credential, tenant, params::Dict{String,<:Any}) + +Creates an app authorization within an app bundle, which allows AppFabric to connect to an +application. + +# Arguments +- `app`: The name of the application. Valid values are: SLACK ASANA JIRA + M365 M365AUDITLOGS ZOOM ZENDESK OKTA GOOGLE DROPBOX SMARTSHEET + CISCO +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `auth_type`: The authorization type for the app authorization. +- `credential`: Contains credentials for the application, such as an API key or OAuth2 + client ID and secret. Specify credentials that match the authorization type for your + request. For example, if the authorization type for your request is OAuth2 (oauth2), then + you should provide only the OAuth2 credentials. +- `tenant`: Contains information about an application tenant, such as the application + display name and identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_app_authorization( + app, + appBundleIdentifier, + authType, + credential, + tenant; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + Dict{String,Any}( + "app" => app, + "authType" => authType, + "credential" => credential, + "tenant" => tenant, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_authorization( + app, + appBundleIdentifier, + authType, + credential, + tenant, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "app" => app, + "authType" => authType, + "credential" => credential, + "tenant" => tenant, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_bundle() + create_app_bundle(params::Dict{String,<:Any}) + +Creates an app bundle to collect data from an application using AppFabric. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"customerManagedKeyIdentifier"`: The Amazon Resource Name (ARN) of the Key Management + Service (KMS) key to use to encrypt the application data. If this is not specified, an + Amazon Web Services owned key is used for encryption. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_app_bundle(; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "POST", + "/appbundles", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_bundle( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/appbundles", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_ingestion(app, app_bundle_identifier, ingestion_type, tenant_id) + create_ingestion(app, app_bundle_identifier, ingestion_type, tenant_id, params::Dict{String,<:Any}) + +Creates a data ingestion for an application. + +# Arguments +- `app`: The name of the application. Valid values are: SLACK ASANA JIRA + M365 M365AUDITLOGS ZOOM ZENDESK OKTA GOOGLE DROPBOX SMARTSHEET + CISCO +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_type`: The ingestion type. +- `tenant_id`: The ID of the application tenant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_ingestion( + app, + appBundleIdentifier, + ingestionType, + tenantId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions", + Dict{String,Any}( + "app" => app, + "ingestionType" => ingestionType, + "tenantId" => tenantId, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ingestion( + app, + appBundleIdentifier, + ingestionType, + tenantId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "app" => app, + "ingestionType" => ingestionType, + "tenantId" => tenantId, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_identifier, processing_configuration) + create_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_identifier, processing_configuration, params::Dict{String,<:Any}) + +Creates an ingestion destination, which specifies how an application's ingested data is +processed by Amazon Web Services AppFabric and where it's delivered. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `destination_configuration`: Contains information about the destination of ingested data. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. +- `processing_configuration`: Contains information about how ingested data is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: Specifies a unique, case-sensitive identifier that you provide to ensure + the idempotency of the request. This lets you safely retry the request without accidentally + performing the same operation a second time. Passing the same value to a later call to an + operation requires that you also pass the same value for all other parameters. We recommend + that you use a UUID type of value. If you don't provide this value, then Amazon Web + Services generates a random one for you. If you retry the operation with the same + ClientToken, but with different parameters, the retry fails with an + IdempotentParameterMismatch error. +- `"tags"`: A map of the key-value pairs of the tag or tags to assign to the resource. +""" +function create_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionIdentifier, + processingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + Dict{String,Any}( + "destinationConfiguration" => destinationConfiguration, + "processingConfiguration" => processingConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionIdentifier, + processingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "destinationConfiguration" => destinationConfiguration, + "processingConfiguration" => processingConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_app_authorization(app_authorization_identifier, app_bundle_identifier) + delete_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Deletes an app authorization. You must delete the associated ingestion before you can +delete an app authorization. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function delete_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_app_bundle(app_bundle_identifier) + delete_app_bundle(app_bundle_identifier, params::Dict{String,<:Any}) + +Deletes an app bundle. You must delete all associated app authorizations before you can +delete an app bundle. + +# Arguments +- `app_bundle_identifier`: The ID or Amazon Resource Name (ARN) of the app bundle that + needs to be deleted. + +""" +function delete_app_bundle( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_bundle( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_ingestion(app_bundle_identifier, ingestion_identifier) + delete_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Deletes an ingestion. You must stop (disable) the ingestion and you must delete all +associated ingestion destinations before you can delete an app ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function delete_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier) + delete_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Deletes an ingestion destination. This deletes the association between an ingestion and +it's destination. It doesn't delete previously ingested data or the storage destination, +such as the Amazon S3 bucket where the data is delivered. If the ingestion destination is +deleted while the associated ingestion is enabled, the ingestion will fail and is +eventually disabled. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function delete_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_app_authorization(app_authorization_identifier, app_bundle_identifier) + get_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Returns information about an app authorization. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function get_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_app_bundle(app_bundle_identifier) + get_app_bundle(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns information about an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +""" +function get_app_bundle( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_app_bundle( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingestion(app_bundle_identifier, ingestion_identifier) + get_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns information about an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function get_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier) + get_ingestion_destination(app_bundle_identifier, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns information about an ingestion destination. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function get_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ingestion_destination( + appBundleIdentifier, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_app_authorizations(app_bundle_identifier) + list_app_authorizations(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns a list of all app authorizations configured for an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_app_authorizations( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_app_authorizations( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/appauthorizations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_app_bundles() + list_app_bundles(params::Dict{String,<:Any}) + +Returns a list of app bundles. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_app_bundles(; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "GET", "/appbundles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_app_bundles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", "/appbundles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_ingestion_destinations(app_bundle_identifier, ingestion_identifier) + list_ingestion_destinations(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Returns a list of all ingestion destinations configured for an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_ingestion_destinations( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_ingestion_destinations( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_ingestions(app_bundle_identifier) + list_ingestions(app_bundle_identifier, params::Dict{String,<:Any}) + +Returns a list of all ingestions configured for an app bundle. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_ingestions( + appBundleIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_ingestions( + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/appbundles/$(appBundleIdentifier)/ingestions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of tags for a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which you want to + retrieve tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_ingestion(app_bundle_identifier, ingestion_identifier) + start_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Starts (enables) an ingestion, which collects data from an application. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function start_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/start"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/start", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_user_access_tasks(app_bundle_identifier, email) + start_user_access_tasks(app_bundle_identifier, email, params::Dict{String,<:Any}) + +Starts the tasks to search user access status for a specific email address. The tasks are +stopped when the user access status data is found. The tasks are terminated when the API +calls to the application time out. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `email`: The email address of the target user. + +""" +function start_user_access_tasks( + appBundleIdentifier, email; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "POST", + "/useraccess/start", + Dict{String,Any}("appBundleIdentifier" => appBundleIdentifier, "email" => email); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_user_access_tasks( + appBundleIdentifier, + email, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/useraccess/start", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appBundleIdentifier" => appBundleIdentifier, "email" => email + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_ingestion(app_bundle_identifier, ingestion_identifier) + stop_ingestion(app_bundle_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Stops (disables) an ingestion. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function stop_ingestion( + appBundleIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_ingestion( + appBundleIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to tag. +- `tags`: A map of the key-value pairs of the tag or tags to assign to the resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return appfabric( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes a tag or tags from a resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource that you want to untag. +- `tag_keys`: The keys of the key-value pairs for the tag or tags you want to remove from + the specified resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return appfabric( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_app_authorization(app_authorization_identifier, app_bundle_identifier) + update_app_authorization(app_authorization_identifier, app_bundle_identifier, params::Dict{String,<:Any}) + +Updates an app authorization within an app bundle, which allows AppFabric to connect to an +application. If the app authorization was in a connected state, updating the app +authorization will set it back to a PendingConnect state. + +# Arguments +- `app_authorization_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the app authorization to use for the request. +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"credential"`: Contains credentials for the application, such as an API key or OAuth2 + client ID and secret. Specify credentials that match the authorization type of the app + authorization to update. For example, if the authorization type of the app authorization is + OAuth2 (oauth2), then you should provide only the OAuth2 credentials. +- `"tenant"`: Contains information about an application tenant, such as the application + display name and identifier. +""" +function update_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_app_authorization( + appAuthorizationIdentifier, + appBundleIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/appauthorizations/$(appAuthorizationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_destination_identifier, ingestion_identifier) + update_ingestion_destination(app_bundle_identifier, destination_configuration, ingestion_destination_identifier, ingestion_identifier, params::Dict{String,<:Any}) + +Updates an ingestion destination, which specifies how an application's ingested data is +processed by Amazon Web Services AppFabric and where it's delivered. + +# Arguments +- `app_bundle_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the app bundle to use for the request. +- `destination_configuration`: Contains information about the destination of ingested data. +- `ingestion_destination_identifier`: The Amazon Resource Name (ARN) or Universal Unique + Identifier (UUID) of the ingestion destination to use for the request. +- `ingestion_identifier`: The Amazon Resource Name (ARN) or Universal Unique Identifier + (UUID) of the ingestion to use for the request. + +""" +function update_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionDestinationIdentifier, + ingestionIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + Dict{String,Any}("destinationConfiguration" => destinationConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_ingestion_destination( + appBundleIdentifier, + destinationConfiguration, + ingestionDestinationIdentifier, + ingestionIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appfabric( + "PATCH", + "/appbundles/$(appBundleIdentifier)/ingestions/$(ingestionIdentifier)/ingestiondestinations/$(ingestionDestinationIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("destinationConfiguration" => destinationConfiguration), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/appflow.jl b/src/services/appflow.jl index aa84d5f2c9..1afb75c8c7 100644 --- a/src/services/appflow.jl +++ b/src/services/appflow.jl @@ -768,6 +768,71 @@ function register_connector( ) end +""" + reset_connector_metadata_cache() + reset_connector_metadata_cache(params::Dict{String,<:Any}) + +Resets metadata about your connector entities that Amazon AppFlow stored in its cache. Use +this action when you want Amazon AppFlow to return the latest information about the data +that you have in a source application. Amazon AppFlow returns metadata about your entities +when you use the ListConnectorEntities or DescribeConnectorEntities actions. Following +these actions, Amazon AppFlow caches the metadata to reduce the number of API requests that +it must send to the source application. Amazon AppFlow automatically resets the cache once +every hour, but you can use this action when you want to get the latest metadata right away. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"apiVersion"`: The API version that you specified in the connector profile that you’re + resetting cached metadata for. You must use this parameter only if the connector supports + multiple API versions or if the connector type is CustomConnector. To look up how many + versions a connector supports, use the DescribeConnectors action. In the response, find the + value that Amazon AppFlow returns for the connectorVersion parameter. To look up the + connector type, use the DescribeConnectorProfiles action. In the response, find the value + that Amazon AppFlow returns for the connectorType parameter. To look up the API version + that you specified in a connector profile, use the DescribeConnectorProfiles action. +- `"connectorEntityName"`: Use this parameter if you want to reset cached metadata about + the details for an individual entity. If you don't include this parameter in your request, + Amazon AppFlow only resets cached metadata about entity names, not entity details. +- `"connectorProfileName"`: The name of the connector profile that you want to reset cached + metadata for. You can omit this parameter if you're resetting the cache for any of the + following connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, + Amazon S3, or Upsolver. If you're resetting the cache for any other connector, you must + include this parameter in your request. +- `"connectorType"`: The type of connector to reset cached metadata for. You must include + this parameter in your request if you're resetting the cache for any of the following + connectors: Amazon Connect, Amazon EventBridge, Amazon Lookout for Metrics, Amazon S3, or + Upsolver. If you're resetting the cache for any other connector, you can omit this + parameter from your request. +- `"entitiesPath"`: Use this parameter only if you’re resetting the cached metadata about + a nested entity. Only some connectors support nested entities. A nested entity is one that + has another entity as a parent. To use this parameter, specify the name of the parent + entity. To look up the parent-child relationship of entities, you can send a + ListConnectorEntities request that omits the entitiesPath parameter. Amazon AppFlow will + return a list of top-level entities. For each one, it indicates whether the entity has + nested entities. Then, in a subsequent ListConnectorEntities request, you can specify a + parent entity name for the entitiesPath parameter. Amazon AppFlow will return a list of the + child entities for that parent. +""" +function reset_connector_metadata_cache(; aws_config::AbstractAWSConfig=global_aws_config()) + return appflow( + "POST", + "/reset-connector-metadata-cache"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_connector_metadata_cache( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appflow( + "POST", + "/reset-connector-metadata-cache", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_flow(flow_name) start_flow(flow_name, params::Dict{String,<:Any}) diff --git a/src/services/application_discovery_service.jl b/src/services/application_discovery_service.jl index a4ce003ec0..cabc439239 100644 --- a/src/services/application_discovery_service.jl +++ b/src/services/application_discovery_service.jl @@ -255,18 +255,18 @@ end describe_agents() describe_agents(params::Dict{String,<:Any}) -Lists agents or connectors as specified by ID or other filters. All agents/connectors -associated with your user account can be listed if you call DescribeAgents as is without -passing any parameters. +Lists agents or collectors as specified by ID or other filters. All agents/collectors +associated with your user can be listed if you call DescribeAgents as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"agentIds"`: The agent or the Connector IDs for which you want information. If you - specify no IDs, the system returns information about all agents/Connectors associated with - your Amazon Web Services user account. +- `"agentIds"`: The agent or the collector IDs for which you want information. If you + specify no IDs, the system returns information about all agents/collectors associated with + your user. - `"filters"`: You can filter the request using various logical operators and a key-value format. For example: {\"key\": \"collectionStatus\", \"value\": \"STARTED\"} -- `"maxResults"`: The total number of agents/Connectors to return in a single page of +- `"maxResults"`: The total number of agents/collectors to return in a single page of output. The maximum value is 100. - `"nextToken"`: Token to retrieve the next set of results. For example, if you previously specified 100 IDs for DescribeAgentsRequestagentIds but set DescribeAgentsRequestmaxResults @@ -333,8 +333,8 @@ end describe_continuous_exports() describe_continuous_exports(params::Dict{String,<:Any}) -Lists exports as specified by ID. All continuous exports associated with your user account -can be listed if you call DescribeContinuousExports as is without passing any parameters. +Lists exports as specified by ID. All continuous exports associated with your user can be +listed if you call DescribeContinuousExports as is without passing any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -466,8 +466,8 @@ end Retrieves a list of configuration items that have tags as specified by the key-value pairs, name and value, passed to the optional parameter filters. There are three valid tag filter names: tagKey tagValue configurationId Also, all configuration items associated -with your user account that have tags can be listed if you call DescribeTags as is without -passing any parameters. +with your user that have tags can be listed if you call DescribeTags as is without passing +any parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -722,16 +722,15 @@ end start_data_collection_by_agent_ids(agent_ids) start_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to start collecting data. +Instructs the specified agents to start collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to start collecting data. If - you send a request to an agent/connector ID that you do not have permission to contact, - according to your Amazon Web Services account, the service does not throw an exception. - Instead, it returns the error in the Description field. If you send a request to multiple - agents/connectors and you do not have permission to contact some of those - agents/connectors, the system does not throw an exception. Instead, the system shows Failed - in the Description field. +- `agent_ids`: The IDs of the agents from which to start collecting data. If you send a + request to an agent ID that you do not have permission to contact, according to your Amazon + Web Services account, the service does not throw an exception. Instead, it returns the + error in the Description field. If you send a request to multiple agents and you do not + have permission to contact some of those agents, the system does not throw an exception. + Instead, the system shows Failed in the Description field. """ function start_data_collection_by_agent_ids( @@ -763,14 +762,22 @@ end start_export_task() start_export_task(params::Dict{String,<:Any}) - Begins the export of discovered data to an S3 bucket. If you specify agentIds in a -filter, the task exports up to 72 hours of detailed data collected by the identified -Application Discovery Agent, including network, process, and performance details. A time -range for exported agent data may be set by using startTime and endTime. Export of detailed -agent data is limited to five concurrently running exports. If you do not include an -agentIds filter, summary data is exported that includes both Amazon Web Services Agentless -Discovery Connector data and summary data from Amazon Web Services Discovery Agents. Export -of summary data is limited to two exports per day. +Begins the export of a discovered data report to an Amazon S3 bucket managed by Amazon Web +Services. Exports might provide an estimate of fees and savings based on certain +information that you provide. Fee estimates do not include any taxes that might apply. Your +actual fees and savings depend on a variety of factors, including your actual usage of +Amazon Web Services services, which might vary from the estimates provided in this report. +If you do not specify preferences or agentIds in the filter, a summary of all servers, +applications, tags, and performance is generated. This data is an aggregation of all server +data collected through on-premises tooling, file import, application grouping and applying +tags. If you specify agentIds in a filter, the task exports up to 72 hours of detailed data +collected by the identified Application Discovery Agent, including network, process, and +performance details. A time range for exported agent data may be set by using startTime and +endTime. Export of detailed agent data is limited to five concurrently running exports. +Export of detailed agent data is limited to two exports per day. If you enable +ec2RecommendationsPreferences in preferences , an Amazon EC2 instance matching the +characteristics of each server in Application Discovery Service is generated. Changing the +attributes of the ec2RecommendationsPreferences changes the criteria of the recommendation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -782,8 +789,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"filters"`: If a filter is present, it selects the single agentId of the Application Discovery Agent for which data is exported. The agentId can be found in the results of the DescribeAgents API or CLI. If no filter is present, startTime and endTime are ignored and - exported data includes both Agentless Discovery Connector data and summary data from - Application Discovery agents. + exported data includes both Amazon Web Services Application Discovery Service Agentless + Collector collectors data and summary data from Application Discovery Agent agents. +- `"preferences"`: Indicates the type of data that needs to be exported. Only one + ExportPreferences can be enabled at any time. - `"startTime"`: The start timestamp for exported data from the single Application Discovery Agent selected in the filters. If no value is specified, data is exported starting from the first data collected by the agent. @@ -806,12 +815,14 @@ end start_import_task(import_url, name, params::Dict{String,<:Any}) Starts an import task, which allows you to import details of your on-premises environment -directly into Amazon Web Services Migration Hub without having to use the Application -Discovery Service (ADS) tools such as the Discovery Connector or Discovery Agent. This -gives you the option to perform migration assessment and planning directly from your -imported data, including the ability to group your devices as applications and track their -migration status. To start an import request, do this: Download the specially formatted -comma separated value (CSV) import template, which you can find here: +directly into Amazon Web Services Migration Hub without having to use the Amazon Web +Services Application Discovery Service (Application Discovery Service) tools such as the +Amazon Web Services Application Discovery Service Agentless Collector or Application +Discovery Agent. This gives you the option to perform migration assessment and planning +directly from your imported data, including the ability to group your devices as +applications and track their migration status. To start an import request, do this: +Download the specially formatted comma separated value (CSV) import template, which you can +find here: https://s3.us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_tem plate.csv. Fill out the template with your server and application data. Upload your import file to an Amazon S3 bucket, and make a note of it's Object URL. Your import file @@ -915,10 +926,10 @@ end stop_data_collection_by_agent_ids(agent_ids) stop_data_collection_by_agent_ids(agent_ids, params::Dict{String,<:Any}) -Instructs the specified agents or connectors to stop collecting data. +Instructs the specified agents to stop collecting data. # Arguments -- `agent_ids`: The IDs of the agents or connectors from which to stop collecting data. +- `agent_ids`: The IDs of the agents from which to stop collecting data. """ function stop_data_collection_by_agent_ids( diff --git a/src/services/application_insights.jl b/src/services/application_insights.jl index d7539f61f9..25aa621a71 100644 --- a/src/services/application_insights.jl +++ b/src/services/application_insights.jl @@ -4,6 +4,61 @@ using AWS.AWSServices: application_insights using AWS.Compat using AWS.UUIDs +""" + add_workload(component_name, resource_group_name, workload_configuration) + add_workload(component_name, resource_group_name, workload_configuration, params::Dict{String,<:Any}) + +Adds a workload to a component. Each component can have at most five workloads. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_configuration`: The configuration settings of the workload. The value is the + escaped JSON of the configuration. + +""" +function add_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "AddWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "AddWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_application() create_application(params::Dict{String,<:Any}) @@ -318,6 +373,9 @@ Describes the application. # Arguments - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_application( ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -356,6 +414,9 @@ Describes a component and lists the resources that are grouped together in a com - `component_name`: The name of the component. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_component( ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -402,6 +463,9 @@ Describes the monitoring configuration of the component. - `component_name`: The name of the component. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_component_configuration( ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -449,6 +513,9 @@ Describes the recommended monitoring configuration of the component. - `resource_group_name`: The name of the resource group. - `tier`: The tier of the application component. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"RecommendationType"`: The recommended configuration type. """ function describe_component_configuration_recommendation( ComponentName, @@ -503,6 +570,9 @@ Describe a specific log pattern from a LogPatternSet. - `pattern_set_name`: The name of the log pattern set. - `resource_group_name`: The name of the resource group. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_log_pattern( PatternName, @@ -555,6 +625,9 @@ Describes an anomaly or error with the application. # Arguments - `observation_id`: The ID of the observation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_observation( ObservationId; aws_config::AbstractAWSConfig=global_aws_config() @@ -590,6 +663,10 @@ Describes an application problem. # Arguments - `problem_id`: The ID of the problem. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the owner of the resource group affected by the + problem. """ function describe_problem(ProblemId; aws_config::AbstractAWSConfig=global_aws_config()) return application_insights( @@ -623,6 +700,9 @@ Describes the anomalies or errors associated with the problem. # Arguments - `problem_id`: The ID of the problem. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. """ function describe_problem_observations( ProblemId; aws_config::AbstractAWSConfig=global_aws_config() @@ -649,6 +729,63 @@ function describe_problem_observations( ) end +""" + describe_workload(component_name, resource_group_name, workload_id) + describe_workload(component_name, resource_group_name, workload_id, params::Dict{String,<:Any}) + +Describes a workload and its configuration. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_id`: The ID of the workload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the workload owner. +""" +function describe_workload( + ComponentName, + ResourceGroupName, + WorkloadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "DescribeWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workload( + ComponentName, + ResourceGroupName, + WorkloadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "DescribeWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_applications() list_applications(params::Dict{String,<:Any}) @@ -657,6 +794,7 @@ Lists the IDs of the applications that you are monitoring. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -685,6 +823,7 @@ Lists the auto-grouped, standalone, and custom components of the application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -728,6 +867,7 @@ quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"EndTime"`: The end time of the event. - `"EventStatus"`: The status of the configuration update event. Possible values include INFO, WARN, and ERROR. @@ -771,6 +911,7 @@ Lists the log pattern sets in the specific application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -813,6 +954,7 @@ Lists the log patterns in the specific log LogPatternSet. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value. - `"NextToken"`: The token to request the next page of results. @@ -853,6 +995,7 @@ Lists the problems with your application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID for the resource group owner. - `"ComponentName"`: The name of the component. - `"EndTime"`: The time when the problem ended, in epoch seconds. If not specified, problems within the past seven days are returned. @@ -862,6 +1005,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceGroupName"`: The name of the resource group. - `"StartTime"`: The time when the problem was detected, in epoch seconds. If you don't specify a time frame for the request, problems within the past seven days are returned. +- `"Visibility"`: Specifies whether or not you can view the problem. If not specified, + visible and ignored problems are returned. """ function list_problems(; aws_config::AbstractAWSConfig=global_aws_config()) return application_insights( @@ -916,6 +1061,112 @@ function list_tags_for_resource( ) end +""" + list_workloads(component_name, resource_group_name) + list_workloads(component_name, resource_group_name, params::Dict{String,<:Any}) + +Lists the workloads that are configured on a given component. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountId"`: The AWS account ID of the owner of the workload. +- `"MaxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned NextToken value. +- `"NextToken"`: The token to request the next page of results. +""" +function list_workloads( + ComponentName, ResourceGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) + return application_insights( + "ListWorkloads", + Dict{String,Any}( + "ComponentName" => ComponentName, "ResourceGroupName" => ResourceGroupName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_workloads( + ComponentName, + ResourceGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "ListWorkloads", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + remove_workload(component_name, resource_group_name, workload_id) + remove_workload(component_name, resource_group_name, workload_id, params::Dict{String,<:Any}) + +Remove workload from a component. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_id`: The ID of the workload. + +""" +function remove_workload( + ComponentName, + ResourceGroupName, + WorkloadId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "RemoveWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_workload( + ComponentName, + ResourceGroupName, + WorkloadId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "RemoveWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadId" => WorkloadId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1231,3 +1482,101 @@ function update_log_pattern( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_problem(problem_id) + update_problem(problem_id, params::Dict{String,<:Any}) + +Updates the visibility of the problem or specifies the problem as RESOLVED. + +# Arguments +- `problem_id`: The ID of the problem. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"UpdateStatus"`: The status of the problem. Arguments can be passed for only problems + that show a status of RECOVERING. +- `"Visibility"`: The visibility of a problem. When you pass a value of IGNORED, the + problem is removed from the default view, and all notifications for the problem are + suspended. When VISIBLE is passed, the IGNORED action is reversed. +""" +function update_problem(ProblemId; aws_config::AbstractAWSConfig=global_aws_config()) + return application_insights( + "UpdateProblem", + Dict{String,Any}("ProblemId" => ProblemId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_problem( + ProblemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateProblem", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ProblemId" => ProblemId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_workload(component_name, resource_group_name, workload_configuration) + update_workload(component_name, resource_group_name, workload_configuration, params::Dict{String,<:Any}) + +Adds a workload to a component. Each component can have at most five workloads. + +# Arguments +- `component_name`: The name of the component. +- `resource_group_name`: The name of the resource group. +- `workload_configuration`: The configuration settings of the workload. The value is the + escaped JSON of the configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WorkloadId"`: The ID of the workload. +""" +function update_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateWorkload", + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workload( + ComponentName, + ResourceGroupName, + WorkloadConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return application_insights( + "UpdateWorkload", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComponentName" => ComponentName, + "ResourceGroupName" => ResourceGroupName, + "WorkloadConfiguration" => WorkloadConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/appstream.jl b/src/services/appstream.jl index 1f15795c3f..eba7e68aad 100644 --- a/src/services/appstream.jl +++ b/src/services/appstream.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: appstream using AWS.Compat using AWS.UUIDs +""" + associate_app_block_builder_app_block(app_block_arn, app_block_builder_name) + associate_app_block_builder_app_block(app_block_arn, app_block_builder_name, params::Dict{String,<:Any}) + +Associates the specified app block builder with the specified app block. + +# Arguments +- `app_block_arn`: The ARN of the app block. +- `app_block_builder_name`: The name of the app block builder. + +""" +function associate_app_block_builder_app_block( + AppBlockArn, AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "AssociateAppBlockBuilderAppBlock", + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, "AppBlockBuilderName" => AppBlockBuilderName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_app_block_builder_app_block( + AppBlockArn, + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "AssociateAppBlockBuilderAppBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, + "AppBlockBuilderName" => AppBlockBuilderName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_application_fleet(application_arn, fleet_name) associate_application_fleet(application_arn, fleet_name, params::Dict{String,<:Any}) @@ -284,8 +330,8 @@ function copy_image( end """ - create_app_block(name, setup_script_details, source_s3_location) - create_app_block(name, setup_script_details, source_s3_location, params::Dict{String,<:Any}) + create_app_block(name, source_s3_location) + create_app_block(name, source_s3_location, params::Dict{String,<:Any}) Creates an app block. App blocks are an Amazon AppStream 2.0 resource that stores the details about the virtual hard disk in an S3 bucket. It also stores the setup script with @@ -296,48 +342,124 @@ fleets. # Arguments - `name`: The name of the app block. -- `setup_script_details`: The setup script details of the app block. - `source_s3_location`: The source S3 location of the app block. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: The description of the app block. - `"DisplayName"`: The display name of the app block. This is not displayed to the user. +- `"PackagingType"`: The packaging type of the app block. +- `"PostSetupScriptDetails"`: The post setup script details of the app block. This can only + be provided for the APPSTREAM2 PackagingType. +- `"SetupScriptDetails"`: The setup script details of the app block. This must be provided + for the CUSTOM PackagingType. - `"Tags"`: The tags assigned to the app block. """ +function create_app_block( + Name, SourceS3Location; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "CreateAppBlock", + Dict{String,Any}("Name" => Name, "SourceS3Location" => SourceS3Location); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end function create_app_block( Name, - SetupScriptDetails, - SourceS3Location; + SourceS3Location, + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return appstream( "CreateAppBlock", Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Name" => Name, "SourceS3Location" => SourceS3Location), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_app_block_builder(instance_type, name, platform, vpc_config) + create_app_block_builder(instance_type, name, platform, vpc_config, params::Dict{String,<:Any}) + +Creates an app block builder. + +# Arguments +- `instance_type`: The instance type to use when launching the app block builder. The + following instance types are available: stream.standard.small stream.standard.medium + stream.standard.large stream.standard.xlarge stream.standard.2xlarge +- `name`: The unique name for the app block builder. +- `platform`: The platform of the app block builder. WINDOWS_SERVER_2019 is the only valid + value. +- `vpc_config`: The VPC configuration for the app block builder. App block builders require + that you specify at least two subnets in different availability zones. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessEndpoints"`: The list of interface VPC endpoint (interface endpoint) objects. + Administrators can connect to the app block builder only through the specified endpoints. +- `"Description"`: The description of the app block builder. +- `"DisplayName"`: The display name of the app block builder. +- `"EnableDefaultInternetAccess"`: Enables or disables default internet access for the app + block builder. +- `"IamRoleArn"`: The Amazon Resource Name (ARN) of the IAM role to apply to the app block + builder. To assume a role, the app block builder calls the AWS Security Token Service (STS) + AssumeRole API operation and passes the ARN of the role to use. The operation creates a new + session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and + creates the appstream_machine_role credential profile on the instance. For more + information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running + on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. +- `"Tags"`: The tags to associate with the app block builder. A tag is a key-value pair, + and the value is optional. For example, Environment=Test. If you do not specify a value, + Environment=. If you do not specify a value, the value is set to an empty string. + Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and + the following special characters: _ . : / = + - @ For more information, see Tagging Your + Resources in the Amazon AppStream 2.0 Administration Guide. +""" +function create_app_block_builder( + InstanceType, + Name, + Platform, + VpcConfig; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateAppBlockBuilder", + Dict{String,Any}( + "InstanceType" => InstanceType, "Name" => Name, - "SetupScriptDetails" => SetupScriptDetails, - "SourceS3Location" => SourceS3Location, + "Platform" => Platform, + "VpcConfig" => VpcConfig, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function create_app_block( +function create_app_block_builder( + InstanceType, Name, - SetupScriptDetails, - SourceS3Location, + Platform, + VpcConfig, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return appstream( - "CreateAppBlock", + "CreateAppBlockBuilder", Dict{String,Any}( mergewith( _merge, Dict{String,Any}( + "InstanceType" => InstanceType, "Name" => Name, - "SetupScriptDetails" => SetupScriptDetails, - "SourceS3Location" => SourceS3Location, + "Platform" => Platform, + "VpcConfig" => VpcConfig, ), params, ), @@ -347,6 +469,49 @@ function create_app_block( ) end +""" + create_app_block_builder_streaming_url(app_block_builder_name) + create_app_block_builder_streaming_url(app_block_builder_name, params::Dict{String,<:Any}) + +Creates a URL to start a create app block builder streaming session. + +# Arguments +- `app_block_builder_name`: The name of the app block builder. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Validity"`: The time that the streaming URL will be valid, in seconds. Specify a value + between 1 and 604800 seconds. The default is 3600 seconds. +""" +function create_app_block_builder_streaming_url( + AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "CreateAppBlockBuilderStreamingURL", + Dict{String,Any}("AppBlockBuilderName" => AppBlockBuilderName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_app_block_builder_streaming_url( + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateAppBlockBuilderStreamingURL", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AppBlockBuilderName" => AppBlockBuilderName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_application(app_block_arn, icon_s3_location, instance_families, launch_path, name, platforms) create_application(app_block_arn, icon_s3_location, instance_families, launch_path, name, platforms, params::Dict{String,<:Any}) @@ -1103,6 +1268,36 @@ function delete_app_block( ) end +""" + delete_app_block_builder(name) + delete_app_block_builder(name, params::Dict{String,<:Any}) + +Deletes an app block builder. An app block builder can only be deleted when it has no +association with an app block. + +# Arguments +- `name`: The name of the app block builder. + +""" +function delete_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "DeleteAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DeleteAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_application(name) delete_application(name, params::Dict{String,<:Any}) @@ -1442,6 +1637,69 @@ function delete_user( ) end +""" + describe_app_block_builder_app_block_associations() + describe_app_block_builder_app_block_associations(params::Dict{String,<:Any}) + +Retrieves a list that describes one or more app block builder associations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AppBlockArn"`: The ARN of the app block. +- `"AppBlockBuilderName"`: The name of the app block builder. +- `"MaxResults"`: The maximum size of each page of results. +- `"NextToken"`: The pagination token used to retrieve the next page of results for this + operation. +""" +function describe_app_block_builder_app_block_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilderAppBlockAssociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_app_block_builder_app_block_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilderAppBlockAssociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_app_block_builders() + describe_app_block_builders(params::Dict{String,<:Any}) + +Retrieves a list that describes one or more app block builders. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum size of each page of results. The maximum value is 25. +- `"Names"`: The names of the app block builders. +- `"NextToken"`: The pagination token used to retrieve the next page of results for this + operation. +""" +function describe_app_block_builders(; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "DescribeAppBlockBuilders"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_app_block_builders( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeAppBlockBuilders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_app_blocks() describe_app_blocks(params::Dict{String,<:Any}) @@ -1969,6 +2227,52 @@ function disable_user( ) end +""" + disassociate_app_block_builder_app_block(app_block_arn, app_block_builder_name) + disassociate_app_block_builder_app_block(app_block_arn, app_block_builder_name, params::Dict{String,<:Any}) + +Disassociates a specified app block builder from a specified app block. + +# Arguments +- `app_block_arn`: The ARN of the app block. +- `app_block_builder_name`: The name of the app block builder. + +""" +function disassociate_app_block_builder_app_block( + AppBlockArn, AppBlockBuilderName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DisassociateAppBlockBuilderAppBlock", + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, "AppBlockBuilderName" => AppBlockBuilderName + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_app_block_builder_app_block( + AppBlockArn, + AppBlockBuilderName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "DisassociateAppBlockBuilderAppBlock", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AppBlockArn" => AppBlockArn, + "AppBlockBuilderName" => AppBlockBuilderName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_application_fleet(application_arn, fleet_name) disassociate_application_fleet(application_arn, fleet_name, params::Dict{String,<:Any}) @@ -2353,6 +2657,37 @@ function list_tags_for_resource( ) end +""" + start_app_block_builder(name) + start_app_block_builder(name, params::Dict{String,<:Any}) + +Starts an app block builder. An app block builder can only be started when it's associated +with an app block. Starting an app block builder starts a new instance, which is equivalent +to an elastic fleet instance with application builder assistance functionality. + +# Arguments +- `name`: The name of the app block builder. + +""" +function start_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "StartAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "StartAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_fleet(name) start_fleet(name, params::Dict{String,<:Any}) @@ -2415,6 +2750,36 @@ function start_image_builder( ) end +""" + stop_app_block_builder(name) + stop_app_block_builder(name, params::Dict{String,<:Any}) + +Stops an app block builder. Stopping an app block builder terminates the instance, and the +instance state is not persisted. + +# Arguments +- `name`: The name of the app block builder. + +""" +function stop_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "StopAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "StopAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_fleet(name) stop_fleet(name, params::Dict{String,<:Any}) @@ -2564,6 +2929,61 @@ function untag_resource( ) end +""" + update_app_block_builder(name) + update_app_block_builder(name, params::Dict{String,<:Any}) + +Updates an app block builder. If the app block builder is in the STARTING or STOPPING +state, you can't update it. If the app block builder is in the RUNNING state, you can only +update the DisplayName and Description. If the app block builder is in the STOPPED state, +you can update any attribute except the Name. + +# Arguments +- `name`: The unique name for the app block builder. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessEndpoints"`: The list of interface VPC endpoint (interface endpoint) objects. + Administrators can connect to the app block builder only through the specified endpoints. +- `"AttributesToDelete"`: The attributes to delete from the app block builder. +- `"Description"`: The description of the app block builder. +- `"DisplayName"`: The display name of the app block builder. +- `"EnableDefaultInternetAccess"`: Enables or disables default internet access for the app + block builder. +- `"IamRoleArn"`: The Amazon Resource Name (ARN) of the IAM role to apply to the app block + builder. To assume a role, the app block builder calls the AWS Security Token Service (STS) + AssumeRole API operation and passes the ARN of the role to use. The operation creates a new + session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and + creates the appstream_machine_role credential profile on the instance. For more + information, see Using an IAM Role to Grant Permissions to Applications and Scripts Running + on AppStream 2.0 Streaming Instances in the Amazon AppStream 2.0 Administration Guide. +- `"InstanceType"`: The instance type to use when launching the app block builder. The + following instance types are available: stream.standard.small stream.standard.medium + stream.standard.large stream.standard.xlarge stream.standard.2xlarge +- `"Platform"`: The platform of the app block builder. WINDOWS_SERVER_2019 is the only + valid value. +- `"VpcConfig"`: The VPC configuration for the app block builder. App block builders + require that you specify at least two subnets in different availability zones. +""" +function update_app_block_builder(Name; aws_config::AbstractAWSConfig=global_aws_config()) + return appstream( + "UpdateAppBlockBuilder", + Dict{String,Any}("Name" => Name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_app_block_builder( + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "UpdateAppBlockBuilder", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_application(name) update_application(name, params::Dict{String,<:Any}) @@ -2778,7 +3198,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a - new instance. Specify a value between 600 and 360000. + new instance. Specify a value between 600 and 432000. - `"Name"`: A unique name for the fleet. - `"Platform"`: The platform of the fleet. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets. diff --git a/src/services/auditmanager.jl b/src/services/auditmanager.jl index 6b33ae45a8..c24660a730 100644 --- a/src/services/auditmanager.jl +++ b/src/services/auditmanager.jl @@ -238,15 +238,15 @@ end batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence) batch_import_evidence_to_assessment_control(assessment_id, control_id, control_set_id, manual_evidence, params::Dict{String,<:Any}) -Uploads one or more pieces of evidence to a control in an Audit Manager assessment. You can -upload manual evidence from any Amazon Simple Storage Service (Amazon S3) bucket by -specifying the S3 URI of the evidence. You must upload manual evidence to your S3 bucket -before you can upload it to your assessment. For instructions, see CreateBucket and -PutObject in the Amazon Simple Storage Service API Reference. The following restrictions -apply to this action: Maximum size of an individual evidence file: 100 MB Number of -daily manual evidence uploads per control: 100 Supported file formats: See Supported file -types for manual evidence in the Audit Manager User Guide For more information about -Audit Manager service restrictions, see Quotas and restrictions for Audit Manager. +Adds one or more pieces of evidence to a control in an Audit Manager assessment. You can +import manual evidence from any S3 bucket by specifying the S3 URI of the object. You can +also upload a file from your browser, or enter plain text in response to a risk assessment +question. The following restrictions apply to this action: manualEvidence can be only +one of the following: evidenceFileName, s3ResourcePath, or textResponse Maximum size of +an individual evidence file: 100 MB Number of daily manual evidence uploads per control: +100 Supported file formats: See Supported file types for manual evidence in the Audit +Manager User Guide For more information about Audit Manager service restrictions, see +Quotas and restrictions for Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -671,7 +671,11 @@ end delete_control(control_id) delete_control(control_id, params::Dict{String,<:Any}) - Deletes a custom control in Audit Manager. + Deletes a custom control in Audit Manager. When you invoke this operation, the custom +control is deleted from any frameworks or assessments that it’s currently part of. As a +result, Audit Manager will stop collecting evidence for that custom control in all of your +assessments. This includes assessments that you previously created before you deleted the +custom control. # Arguments - `control_id`: The unique identifier for the control. @@ -838,7 +842,7 @@ end get_account_status() get_account_status(params::Dict{String,<:Any}) - Returns the registration status of an account in Audit Manager. + Gets the registration status of an account in Audit Manager. """ function get_account_status(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -862,7 +866,7 @@ end get_assessment(assessment_id) get_assessment(assessment_id, params::Dict{String,<:Any}) -Returns an assessment from Audit Manager. +Gets information about a specified assessment. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -894,7 +898,7 @@ end get_assessment_framework(framework_id) get_assessment_framework(framework_id, params::Dict{String,<:Any}) -Returns a framework from Audit Manager. +Gets information about a specified framework. # Arguments - `framework_id`: The identifier for the framework. @@ -928,7 +932,7 @@ end get_assessment_report_url(assessment_id, assessment_report_id) get_assessment_report_url(assessment_id, assessment_report_id, params::Dict{String,<:Any}) - Returns the URL of an assessment report in Audit Manager. + Gets the URL of an assessment report in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -964,7 +968,7 @@ end get_change_logs(assessment_id) get_change_logs(assessment_id, params::Dict{String,<:Any}) - Returns a list of changelogs from Audit Manager. + Gets a list of changelogs from Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1003,7 +1007,7 @@ end get_control(control_id) get_control(control_id, params::Dict{String,<:Any}) - Returns a control from Audit Manager. + Gets information about a specified control. # Arguments - `control_id`: The identifier for the control. @@ -1035,7 +1039,7 @@ end get_delegations() get_delegations(params::Dict{String,<:Any}) - Returns a list of delegations from an audit owner to a delegate. + Gets a list of delegations from an audit owner to a delegate. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1064,7 +1068,7 @@ end get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id) get_evidence(assessment_id, control_set_id, evidence_folder_id, evidence_id, params::Dict{String,<:Any}) - Returns evidence from Audit Manager. + Gets information about a specified evidence item. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1109,7 +1113,7 @@ end get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_by_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns all evidence from a specified evidence folder in Audit Manager. + Gets all evidence from a specified evidence folder in Audit Manager. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1152,11 +1156,55 @@ function get_evidence_by_evidence_folder( ) end +""" + get_evidence_file_upload_url(file_name) + get_evidence_file_upload_url(file_name, params::Dict{String,<:Any}) + +Creates a presigned Amazon S3 URL that can be used to upload a file as manual evidence. For +instructions on how to use this operation, see Upload a file from your browser in the +Audit Manager User Guide. The following restrictions apply to this operation: Maximum +size of an individual evidence file: 100 MB Number of daily manual evidence uploads per +control: 100 Supported file formats: See Supported file types for manual evidence in the +Audit Manager User Guide For more information about Audit Manager service restrictions, +see Quotas and restrictions for Audit Manager. + +# Arguments +- `file_name`: The file that you want to upload. For a list of supported file formats, see + Supported file types for manual evidence in the Audit Manager User Guide. + +""" +function get_evidence_file_upload_url( + fileName; aws_config::AbstractAWSConfig=global_aws_config() +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}("fileName" => fileName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_evidence_file_upload_url( + fileName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return auditmanager( + "GET", + "/evidenceFileUploadUrl", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("fileName" => fileName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_evidence_folder(assessment_id, control_set_id, evidence_folder_id) get_evidence_folder(assessment_id, control_set_id, evidence_folder_id, params::Dict{String,<:Any}) - Returns an evidence folder from the specified assessment in Audit Manager. + Gets an evidence folder from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1198,7 +1246,7 @@ end get_evidence_folders_by_assessment(assessment_id) get_evidence_folders_by_assessment(assessment_id, params::Dict{String,<:Any}) - Returns the evidence folders from a specified assessment in Audit Manager. + Gets the evidence folders from a specified assessment in Audit Manager. # Arguments - `assessment_id`: The unique identifier for the assessment. @@ -1237,8 +1285,8 @@ end get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id) get_evidence_folders_by_assessment_control(assessment_id, control_id, control_set_id, params::Dict{String,<:Any}) - Returns a list of evidence folders that are associated with a specified control in an -Audit Manager assessment. + Gets a list of evidence folders that are associated with a specified control in an Audit +Manager assessment. # Arguments - `assessment_id`: The identifier for the assessment. @@ -1335,7 +1383,7 @@ end get_organization_admin_account() get_organization_admin_account(params::Dict{String,<:Any}) - Returns the name of the delegated Amazon Web Services administrator account for the + Gets the name of the delegated Amazon Web Services administrator account for a specified organization. """ @@ -1363,7 +1411,7 @@ end get_services_in_scope() get_services_in_scope(params::Dict{String,<:Any}) -Returns a list of all of the Amazon Web Services that you can choose to include in your +Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope. @@ -1385,7 +1433,7 @@ end get_settings(attribute) get_settings(attribute, params::Dict{String,<:Any}) - Returns the settings for the specified Amazon Web Services account. + Gets the settings for a specified Amazon Web Services account. # Arguments - `attribute`: The list of setting attribute enum values. @@ -2435,8 +2483,10 @@ end # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"defaultAssessmentReportsDestination"`: The default storage destination for assessment - reports. +- `"defaultAssessmentReportsDestination"`: The default S3 destination bucket for storing + assessment reports. +- `"defaultExportDestination"`: The default S3 destination bucket for storing evidence + finder exports. - `"defaultProcessOwners"`: A list of the default audit owners. - `"deregistrationPolicy"`: The deregistration policy for your Audit Manager data. You can use this attribute to determine how your data is handled when you deregister Audit Manager. diff --git a/src/services/auto_scaling.jl b/src/services/auto_scaling.jl index 50b07239ff..f5a5e19ca3 100644 --- a/src/services/auto_scaling.jl +++ b/src/services/auto_scaling.jl @@ -392,8 +392,7 @@ notifications to the target. Create the lifecycle hook. Specify whether the ho when the instances launch or terminate. If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state. If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call. For more -information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling -User Guide. +information, see Complete a lifecycle action in the Amazon EC2 Auto Scaling User Guide. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -2144,9 +2143,9 @@ end detach_traffic_sources(auto_scaling_group_name, traffic_sources, params::Dict{String,<:Any}) Detaches one or more traffic sources from the specified Auto Scaling group. When you detach -a taffic, it enters the Removing state while deregistering the instances in the group. When -all instances are deregistered, then you can no longer describe the traffic source using -the DescribeTrafficSources API call. The instances continue to run. +a traffic source, it enters the Removing state while deregistering the instances in the +group. When all instances are deregistered, then you can no longer describe the traffic +source using the DescribeTrafficSources API call. The instances continue to run. # Arguments - `auto_scaling_group_name`: The name of the Auto Scaling group. @@ -2745,8 +2744,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the policy type is PredictiveScaling. - `"ScalingAdjustment"`: The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from - the current capacity. For exact capacity, you must specify a positive value. Required if - the policy type is SimpleScaling. (Not used with any other policy type.) + the current capacity. For exact capacity, you must specify a non-negative value. Required + if the policy type is SimpleScaling. (Not used with any other policy type.) - `"StepAdjustments"`: A set of adjustments that enable you to scale based on the size of the alarm breach. Required if the policy type is StepScaling. (Not used with any other policy type.) @@ -3060,8 +3059,8 @@ function resume_processes( end """ - rollback_instance_refresh() - rollback_instance_refresh(params::Dict{String,<:Any}) + rollback_instance_refresh(auto_scaling_group_name) + rollback_instance_refresh(auto_scaling_group_name, params::Dict{String,<:Any}) Cancels an instance refresh that is in progress and rolls back any changes that it made. Amazon EC2 Auto Scaling replaces any instances that were replaced during the instance @@ -3076,21 +3075,34 @@ launch template's Latest or Default version. When you receive a successful res this operation, Amazon EC2 Auto Scaling immediately begins replacing instances. You can check the status of this operation through the DescribeInstanceRefreshes API operation. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AutoScalingGroupName"`: The name of the Auto Scaling group. +# Arguments +- `auto_scaling_group_name`: The name of the Auto Scaling group. + """ -function rollback_instance_refresh(; aws_config::AbstractAWSConfig=global_aws_config()) +function rollback_instance_refresh( + AutoScalingGroupName; aws_config::AbstractAWSConfig=global_aws_config() +) return auto_scaling( - "RollbackInstanceRefresh"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "RollbackInstanceRefresh", + Dict{String,Any}("AutoScalingGroupName" => AutoScalingGroupName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function rollback_instance_refresh( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + AutoScalingGroupName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return auto_scaling( "RollbackInstanceRefresh", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("AutoScalingGroupName" => AutoScalingGroupName), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) diff --git a/src/services/batch.jl b/src/services/batch.jl index ea6bb61249..2b2024a8eb 100644 --- a/src/services/batch.jl +++ b/src/services/batch.jl @@ -10,9 +10,13 @@ using AWS.UUIDs Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED or PENDING are canceled. A job inRUNNABLE remains in RUNNABLE until it reaches the head of the job queue. -Then the job status is updated to FAILED. Jobs that progressed to the STARTING or RUNNING -state aren't canceled. However, the API operation still succeeds, even if no job is -canceled. These jobs must be terminated with the TerminateJob operation. +Then the job status is updated to FAILED. A PENDING job is canceled after all dependency +jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING +status. When you try to cancel an array parent job in PENDING, Batch attempts to cancel all +child jobs. The array parent job is canceled when all child jobs are completed. Jobs that +progressed to the STARTING or RUNNING state aren't canceled. However, the API operation +still succeeds, even if no job is canceled. These jobs must be terminated with the +TerminateJob operation. # Arguments - `job_id`: The Batch job ID of the job to cancel. @@ -88,11 +92,13 @@ updating of compute environments to update AMIs, follow these rules: Either do service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role. Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE or SPOT_CAPACITY_OPTIMIZED. Set the update to latest image version -(updateToLatestImageVersion) parameter to true. Don't specify an AMI ID in imageId, -imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In -that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at -the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID -in the imageId or imageIdOverride parameters, or the launch template identified by the +(updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is +used when you update a compute environment. This parameter is ignored when you create a +compute environment. Don't specify an AMI ID in imageId, imageIdOverride (in +ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects +the latest Amazon ECS optimized AMI that's supported by Batch at the time the +infrastructure update is initiated. Alternatively, you can specify the AMI ID in the +imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be @@ -996,9 +1002,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition. The minimum supported value is 0 and the maximum supported value is 9999. -- `"shareIdentifier"`: The share identifier for the job. If the job queue doesn't have a - scheduling policy, then this parameter must not be specified. If the job queue has a - scheduling policy, then this parameter must be specified. +- `"shareIdentifier"`: The share identifier for the job. Don't specify this parameter if + the job queue doesn't have a scheduling policy. If the job queue has a scheduling policy, + then this parameter must be specified. This string is limited to 255 alphanumeric + characters, and can be followed by an asterisk (*). - `"tags"`: The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference. diff --git a/src/services/billingconductor.jl b/src/services/billingconductor.jl index 64b8311620..e0be9566ec 100644 --- a/src/services/billingconductor.jl +++ b/src/services/billingconductor.jl @@ -194,7 +194,7 @@ Services charges, based off of the predefined pricing plan computation. # Arguments - `account_grouping`: The set of accounts that will be under the billing group. The set of - accounts resemble the linked accounts in a consolidated family. + accounts resemble the linked accounts in a consolidated billing family. - `computation_preference`: The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group. - `name`: The billing group name. The names must be unique. @@ -1178,6 +1178,8 @@ This updates an existing billing group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccountGrouping"`: Specifies if the billing group has automatic account association + (AutoAssociate) enabled. - `"ComputationPreference"`: The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group. - `"Description"`: A description of the billing group. diff --git a/src/services/chime.jl b/src/services/chime.jl index cd7d74bd2a..099728e69e 100644 --- a/src/services/chime.jl +++ b/src/services/chime.jl @@ -51,7 +51,11 @@ end associate_phone_numbers_with_voice_connector(e164_phone_numbers, voice_connector_id) associate_phone_numbers_with_voice_connector(e164_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) -Associates phone numbers with the specified Amazon Chime Voice Connector. +Associates phone numbers with the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +AssociatePhoneNumbersWithVoiceConnector, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -97,7 +101,11 @@ end associate_phone_numbers_with_voice_connector_group(e164_phone_numbers, voice_connector_group_id) associate_phone_numbers_with_voice_connector_group(e164_phone_numbers, voice_connector_group_id, params::Dict{String,<:Any}) -Associates phone numbers with the specified Amazon Chime Voice Connector group. +Associates phone numbers with the specified Amazon Chime Voice Connector group. This API +is is no longer supported and will not be updated. We recommend using the latest version, +AssociatePhoneNumbersWithVoiceConnectorGroup, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -188,9 +196,12 @@ end batch_create_attendee(attendees, meeting_id) batch_create_attendee(attendees, meeting_id, params::Dict{String,<:Any}) - Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For more -information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide. +Creates up to 100 new attendees for an active Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +BatchCreateAttendee, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. For more information about the Amazon +Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. # Arguments - `attendees`: The request containing the attendees to create. @@ -229,7 +240,11 @@ end batch_create_channel_membership(member_arns, channel_arn) batch_create_channel_membership(member_arns, channel_arn, params::Dict{String,<:Any}) -Adds a specified number of users to a channel. +Adds a specified number of users to a channel. This API is is no longer supported and +will not be updated. We recommend using the latest version, BatchCreateChannelMembership, +in the Amazon Chime SDK. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `member_arns`: The ARNs of the members you want to add to the channel. @@ -409,9 +424,9 @@ end Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon -Chime Accounts in the account types, in the Amazon Chime Administration Guide. -Previously suspended users who are unsuspended using this action are returned to Registered -status. Users who are not previously suspended are ignored. +Chime Accounts in the account types, in the Amazon Chime Administration Guide. Previously +suspended users who are unsuspended using this action are returned to Registered status. +Users who are not previously suspended are ignored. # Arguments - `user_id_list`: The request containing the user IDs to unsuspend. @@ -580,7 +595,10 @@ end Creates an Amazon Chime SDK messaging AppInstance under an AWS account. Only SDK messaging customers use this API. CreateAppInstance supports idempotency behavior as described in the -AWS API Standard. +AWS API Standard. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateAppInstance, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The ClientRequestToken of the AppInstance. @@ -630,9 +648,12 @@ end create_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) Promotes an AppInstanceUser to an AppInstanceAdmin. The promoted user can perform the -following actions. ChannelModerator actions across all channels in the AppInstance. -DeleteChannelMessage actions. Only an AppInstanceUser can be promoted to an -AppInstanceAdmin role. +following actions. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateAppInstanceAdmin, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. +ChannelModerator actions across all channels in the AppInstance. DeleteChannelMessage +actions. Only an AppInstanceUser can be promoted to an AppInstanceAdmin role. # Arguments - `app_instance_admin_arn`: The ARN of the administrator of the current AppInstance. @@ -676,7 +697,11 @@ end create_app_instance_user(app_instance_arn, app_instance_user_id, client_request_token, name, params::Dict{String,<:Any}) Creates a user under an Amazon Chime AppInstance. The request consists of a unique -appInstanceUserId and Name for that user. +appInstanceUserId and Name for that user. This API is is no longer supported and will not +be updated. We recommend using the latest version, CreateAppInstanceUser, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance request. @@ -743,7 +768,10 @@ end Creates a new attendee for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer -Guide. +Guide. This API is is no longer supported and will not be updated. We recommend using +the latest version, CreateAttendee, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `external_user_id`: The Amazon Chime SDK external user ID. An idempotency token. Links @@ -830,7 +858,11 @@ end Creates a channel to which you can add users and send messages. Restriction: You can't change a channel's privacy. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, CreateChannel, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the channel request. @@ -902,7 +934,10 @@ To undo a ban, you first have to DeleteChannelBan, and then CreateChannelMembers are cleaned up when you delete users or channels. If you ban a user who is already part of a channel, that user is automatically kicked from the channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call -as the value in the header. +as the value in the header. This API is is no longer supported and will not be updated. +We recommend using the latest version, CreateChannelBan, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `member_arn`: The ARN of the member being banned. @@ -950,7 +985,11 @@ messages Leave the channel Privacy settings impact this action as follows: Channels: You do not need to be a member to list messages, but you must be a member to send messages. Private Channels: You must be a member to list or send messages. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, CreateChannelMembership, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `member_arn`: The ARN of the member you want to add to the channel. @@ -1003,7 +1042,11 @@ Creates a new ChannelModerator. A channel moderator can: Add and remove other the channel. Add and remove other moderators of the channel. Add and remove user bans for the channel. Redact messages in the channel. List messages in the channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, CreateChannelModerator, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_moderator_arn`: The ARN of the moderator. @@ -1049,7 +1092,11 @@ end create_media_capture_pipeline(sink_arn, sink_type, source_arn, source_type) create_media_capture_pipeline(sink_arn, sink_type, source_arn, source_type, params::Dict{String,<:Any}) -Creates a media capture pipeline. +Creates a media capture pipeline. This API is is no longer supported and will not be +updated. We recommend using the latest version, CreateMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `sink_arn`: The ARN of the sink type. @@ -1120,10 +1167,14 @@ end create_meeting(client_request_token) create_meeting(client_request_token, params::Dict{String,<:Any}) - Creates a new Amazon Chime SDK meeting in the specified media Region with no initial +Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime SDK Developer Guide . For more information about the Amazon -Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . +Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. This +API is is no longer supported and will not be updated. We recommend using the latest +version, CreateMeeting, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The unique identifier for the client request. Use a different @@ -1180,6 +1231,7 @@ forth) to initiate an outbound call to a public switched telephone network (PSTN them into a Chime meeting. Also ensures that the From number belongs to the customer. To play welcome audio or implement an interactive voice response (IVR), use the CreateSipMediaApplicationCall action with the corresponding SIP media application ID. +This API is is not available in a dedicated namespace. # Arguments - `from_phone_number`: Phone number used as the caller ID when the remote party receives a @@ -1243,7 +1295,11 @@ end Creates a new Amazon Chime SDK meeting in the specified media Region, with attendees. For more information about specifying media Regions, see Amazon Chime SDK Media Regions in the Amazon Chime SDK Developer Guide . For more information about the Amazon Chime SDK, see -Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . +Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . This API is is no +longer supported and will not be updated. We recommend using the latest version, +CreateMeetingWithAttendees, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The unique identifier for the client request. Use a different @@ -1347,7 +1403,10 @@ end create_proxy_session(capabilities, participant_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) Creates a proxy session on the specified Amazon Chime Voice Connector for the specified -participant phone numbers. +participant phone numbers. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateProxySession, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `capabilities`: The proxy session capabilities. @@ -1500,7 +1559,11 @@ end create_sip_media_application(aws_region, endpoints, name) create_sip_media_application(aws_region, endpoints, name, params::Dict{String,<:Any}) -Creates a SIP media application. +Creates a SIP media application. This API is is no longer supported and will not be +updated. We recommend using the latest version, CreateSipMediaApplication, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `aws_region`: The AWS Region assigned to the SIP media application. @@ -1551,7 +1614,11 @@ end create_sip_media_application_call(from_phone_number, to_phone_number, sip_media_application_id, params::Dict{String,<:Any}) Creates an outbound call to a phone number from the phone number specified in the request, -and it invokes the endpoint of the specified sipMediaApplicationId. +and it invokes the endpoint of the specified sipMediaApplicationId. This API is is no +longer supported and will not be updated. We recommend using the latest version, +CreateSipMediaApplicationCall, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `from_phone_number`: The phone number that a user calls from. This is a phone number in @@ -1608,7 +1675,10 @@ end create_sip_rule(name, target_applications, trigger_type, trigger_value, params::Dict{String,<:Any}) Creates a SIP rule which can be used to run a SIP media application as a target for a -specific trigger type. +specific trigger type. This API is is no longer supported and will not be updated. We +recommend using the latest version, CreateSipRule, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the SIP rule. @@ -1718,10 +1788,14 @@ end create_voice_connector(name, require_encryption, params::Dict{String,<:Any}) Creates an Amazon Chime Voice Connector under the administrator's AWS account. You can -choose to create an Amazon Chime Voice Connector in a specific AWS Region. Enabling +choose to create an Amazon Chime Voice Connector in a specific AWS Region. Enabling CreateVoiceConnectorRequestRequireEncryption configures your Amazon Chime Voice Connector to use TLS transport for SIP signaling and Secure RTP (SRTP) for media. Inbound calls use -TLS transport, and unencrypted outbound calls are blocked. +TLS transport, and unencrypted outbound calls are blocked. This API is is no longer +supported and will not be updated. We recommend using the latest version, +CreateVoiceConnector, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector. @@ -1773,7 +1847,11 @@ Creates an Amazon Chime Voice Connector group under the administrator's AWS acco can associate Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems in the request. You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for -fallback in case of availability events. +fallback in case of availability events. This API is is no longer supported and will not +be updated. We recommend using the latest version, CreateVoiceConnectorGroup, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector group. @@ -1847,7 +1925,11 @@ end delete_app_instance(app_instance_arn) delete_app_instance(app_instance_arn, params::Dict{String,<:Any}) -Deletes an AppInstance and all associated data asynchronously. +Deletes an AppInstance and all associated data asynchronously. This API is is no longer +supported and will not be updated. We recommend using the latest version, +DeleteAppInstance, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -1882,6 +1964,10 @@ end delete_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) Demotes an AppInstanceAdmin to an AppInstanceUser. This action does not delete the user. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteAppInstanceAdmin, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_admin_arn`: The ARN of the AppInstance's administrator. @@ -1917,7 +2003,11 @@ end delete_app_instance_streaming_configurations(app_instance_arn) delete_app_instance_streaming_configurations(app_instance_arn, params::Dict{String,<:Any}) -Deletes the streaming configurations of an AppInstance. +Deletes the streaming configurations of an AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +DeleteAppInstanceStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the streaming configurations being deleted. @@ -1951,7 +2041,10 @@ end delete_app_instance_user(app_instance_user_arn) delete_app_instance_user(app_instance_user_arn, params::Dict{String,<:Any}) -Deletes an AppInstanceUser. +Deletes an AppInstanceUser. This API is is no longer supported and will not be updated. +We recommend using the latest version, DeleteAppInstanceUser, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_user_arn`: The ARN of the user request being deleted. @@ -1988,7 +2081,10 @@ end Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the -Amazon Chime SDK Developer Guide. +Amazon Chime SDK Developer Guide. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteAttendee, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -2027,6 +2123,10 @@ end Immediately makes a channel and its memberships inaccessible and marks them for deletion. This is an irreversible process. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannel, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel being deleted. @@ -2063,7 +2163,10 @@ end Removes a user from a channel's ban list. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, DeleteChannelBan, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which the AppInstanceUser was banned. @@ -2104,6 +2207,10 @@ end Removes a member from a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelMembership, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which you want to remove the user. @@ -2145,7 +2252,11 @@ end Deletes a channel message. Only admins can perform this action. Deletion makes messages inaccessible immediately. A background process deletes any revisions created by UpdateChannelMessage. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelMessage, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2185,7 +2296,11 @@ end delete_channel_moderator(channel_arn, channel_moderator_arn, params::Dict{String,<:Any}) Deletes a channel moderator. The x-amz-chime-bearer request header is mandatory. Use the -AppInstanceUserArn of the user that makes the API call as the value in the header. +AppInstanceUserArn of the user that makes the API call as the value in the header. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteChannelModerator, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2260,7 +2375,11 @@ end delete_media_capture_pipeline(media_pipeline_id) delete_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) -Deletes the media capture pipeline. +Deletes the media capture pipeline. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `media_pipeline_id`: The ID of the media capture pipeline being deleted. @@ -2297,7 +2416,10 @@ end Deletes the specified Amazon Chime SDK meeting. The operation deletes all attendees, disconnects all clients, and prevents new clients from joining the meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide. +SDK Developer Guide. This API is is no longer supported and will not be updated. We +recommend using the latest version, DeleteMeeting, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -2366,7 +2488,11 @@ end delete_proxy_session(proxy_session_id, voice_connector_id) delete_proxy_session(proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) -Deletes the specified proxy session from the specified Amazon Chime Voice Connector. +Deletes the specified proxy session from the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteProxySession, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `proxy_session_id`: The proxy session ID. @@ -2474,7 +2600,11 @@ end delete_sip_media_application(sip_media_application_id) delete_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) -Deletes a SIP media application. +Deletes a SIP media application. This API is is no longer supported and will not be +updated. We recommend using the latest version, DeleteSipMediaApplication, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -2508,7 +2638,11 @@ end delete_sip_rule(sip_rule_id) delete_sip_rule(sip_rule_id, params::Dict{String,<:Any}) -Deletes a SIP rule. You must disable a SIP rule before you can delete it. +Deletes a SIP rule. You must disable a SIP rule before you can delete it. This API is is +no longer supported and will not be updated. We recommend using the latest version, +DeleteSipRule, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_rule_id`: The SIP rule ID. @@ -2541,7 +2675,11 @@ end delete_voice_connector(voice_connector_id, params::Dict{String,<:Any}) Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the -Amazon Chime Voice Connector must be disassociated from it before it can be deleted. +Amazon Chime Voice Connector must be disassociated from it before it can be deleted. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteVoiceConnector, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2576,7 +2714,11 @@ end delete_voice_connector_emergency_calling_configuration(voice_connector_id, params::Dict{String,<:Any}) Deletes the emergency calling configuration details from the specified Amazon Chime Voice -Connector. +Connector. This API is is no longer supported and will not be updated. We recommend using +the latest version, DeleteVoiceConnectorEmergencyCallingConfiguration, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2611,7 +2753,11 @@ end delete_voice_connector_group(voice_connector_group_id, params::Dict{String,<:Any}) Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems and phone -numbers associated with the group must be removed before it can be deleted. +numbers associated with the group must be removed before it can be deleted. This API is +is no longer supported and will not be updated. We recommend using the latest version, +DeleteVoiceConnectorGroup, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_group_id`: The Amazon Chime Voice Connector group ID. @@ -2647,7 +2793,11 @@ end Deletes the origination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted -prior to deleting the origination settings. +prior to deleting the origination settings. This API is is no longer supported and will +not be updated. We recommend using the latest version, DeleteVoiceConnectorOrigination, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2681,7 +2831,11 @@ end delete_voice_connector_proxy(voice_connector_id) delete_voice_connector_proxy(voice_connector_id, params::Dict{String,<:Any}) -Deletes the proxy configuration from the specified Amazon Chime Voice Connector. +Deletes the proxy configuration from the specified Amazon Chime Voice Connector. This API +is is no longer supported and will not be updated. We recommend using the latest version, +DeleteVoiceProxy, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2715,7 +2869,11 @@ end delete_voice_connector_streaming_configuration(voice_connector_id) delete_voice_connector_streaming_configuration(voice_connector_id, params::Dict{String,<:Any}) -Deletes the streaming configuration for the specified Amazon Chime Voice Connector. +Deletes the streaming configuration for the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, DeleteVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2751,7 +2909,11 @@ end Deletes the termination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted -prior to deleting the termination settings. +prior to deleting the termination settings. This API is is no longer supported and will +not be updated. We recommend using the latest version, DeleteVoiceConnectorTermination, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -2786,7 +2948,11 @@ end delete_voice_connector_termination_credentials(usernames, voice_connector_id, params::Dict{String,<:Any}) Deletes the specified SIP credentials used by your equipment to authenticate during call -termination. +termination. This API is is no longer supported and will not be updated. We recommend +using the latest version, DeleteVoiceConnectorTerminationCredentials, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `usernames`: The RFC2617 compliant username associated with the SIP credentials, in @@ -2826,7 +2992,11 @@ end describe_app_instance(app_instance_arn) describe_app_instance(app_instance_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstance. +Returns the full details of an AppInstance. This API is is no longer supported and will +not be updated. We recommend using the latest version, DescribeAppInstance, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -2860,7 +3030,11 @@ end describe_app_instance_admin(app_instance_admin_arn, app_instance_arn) describe_app_instance_admin(app_instance_admin_arn, app_instance_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstanceAdmin. +Returns the full details of an AppInstanceAdmin. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeAppInstanceAdmin, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `app_instance_admin_arn`: The ARN of the AppInstanceAdmin. @@ -2896,7 +3070,11 @@ end describe_app_instance_user(app_instance_user_arn) describe_app_instance_user(app_instance_user_arn, params::Dict{String,<:Any}) -Returns the full details of an AppInstanceUser. +Returns the full details of an AppInstanceUser. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeAppInstanceUser, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `app_instance_user_arn`: The ARN of the AppInstanceUser. @@ -2932,7 +3110,11 @@ end Returns the full details of a channel in an Amazon Chime AppInstance. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, DescribeChannel, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -2969,7 +3151,10 @@ end Returns the full details of a channel ban. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, DescribeChannelBan, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel from which the user is banned. @@ -3010,7 +3195,10 @@ end Returns the full details of a user's channel membership. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, DescribeChannelMembership, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3051,7 +3239,11 @@ end Returns the details of a channel based on the membership of the specified AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user -that makes the API call as the value in the header. +that makes the API call as the value in the header. This API is is no longer supported +and will not be updated. We recommend using the latest version, +DescribeChannelMembershipForAppInstanceUser, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-user-arn`: The ARN of the user in a channel. @@ -3099,7 +3291,11 @@ end Returns the full details of a channel moderated by the specified AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, +DescribeChannelModeratedByAppInstanceUser, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-user-arn`: The ARN of the AppInstanceUser in the moderated channel. @@ -3147,7 +3343,10 @@ end Returns the full details of a single ChannelModerator. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, DescribeChannelModerator, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3223,6 +3422,10 @@ end disassociate_phone_numbers_from_voice_connector(e164_phone_numbers, voice_connector_id, params::Dict{String,<:Any}) Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector. + This API is is no longer supported and will not be updated. We recommend using the latest +version, DisassociatePhoneNumbersFromVoiceConnector, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -3264,7 +3467,10 @@ end disassociate_phone_numbers_from_voice_connector_group(e164_phone_numbers, voice_connector_group_id, params::Dict{String,<:Any}) Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector -group. +group. This API is is no longer supported and will not be updated. We recommend using the +latest version, DisassociatePhoneNumbersFromVoiceConnectorGroup, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `e164_phone_numbers`: List of phone numbers, in E.164 format. @@ -3413,7 +3619,11 @@ end get_app_instance_retention_settings(app_instance_arn) get_app_instance_retention_settings(app_instance_arn, params::Dict{String,<:Any}) -Gets the retention settings for an AppInstance. +Gets the retention settings for an AppInstance. This API is is no longer supported and +will not be updated. We recommend using the latest version, GetMessagingRetentionSettings, +in the Amazon Chime SDK. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -3447,7 +3657,11 @@ end get_app_instance_streaming_configurations(app_instance_arn) get_app_instance_streaming_configurations(app_instance_arn, params::Dict{String,<:Any}) -Gets the streaming settings for an AppInstance. +Gets the streaming settings for an AppInstance. This API is is no longer supported and +will not be updated. We recommend using the latest version, +GetMessagingStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -3483,7 +3697,10 @@ end Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon -Chime SDK Developer Guide . +Chime SDK Developer Guide. This API is is no longer supported and will not be updated. +We recommend using the latest version, GetAttendee, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -3556,7 +3773,10 @@ end Gets the full details of a channel message. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, GetChannelMessage, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -3651,7 +3871,11 @@ end get_media_capture_pipeline(media_pipeline_id) get_media_capture_pipeline(media_pipeline_id, params::Dict{String,<:Any}) -Gets an existing media capture pipeline. +Gets an existing media capture pipeline. This API is is no longer supported and will not +be updated. We recommend using the latest version, GetMediaCapturePipeline, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `media_pipeline_id`: The ID of the pipeline that you want to get. @@ -3685,9 +3909,12 @@ end get_meeting(meeting_id) get_meeting(meeting_id, params::Dict{String,<:Any}) - Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more -information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime -SDK Developer Guide . + This API is is no longer supported and will not be updated. We recommend using the latest +version, GetMeeting, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. Gets the Amazon Chime SDK meeting +details for the specified meeting ID. For more information about the Amazon Chime SDK, see +Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide . # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -3719,7 +3946,11 @@ end get_messaging_session_endpoint() get_messaging_session_endpoint(params::Dict{String,<:Any}) -The details of the endpoint for the messaging session. +The details of the endpoint for the messaging session. This API is is no longer supported +and will not be updated. We recommend using the latest version, +GetMessagingSessionEndpoint, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. """ function get_messaging_session_endpoint(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3843,6 +4074,10 @@ end get_proxy_session(proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) Gets the specified proxy session details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetProxySession, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `proxy_session_id`: The proxy session ID. @@ -3950,7 +4185,10 @@ end get_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) Retrieves the information for a SIP media application, including name, AWS Region, and -endpoints. +endpoints. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetSipMediaApplication, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -3984,7 +4222,11 @@ end get_sip_media_application_logging_configuration(sip_media_application_id) get_sip_media_application_logging_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Returns the logging configuration for the specified SIP media application. +Returns the logging configuration for the specified SIP media application. This API is is +no longer supported and will not be updated. We recommend using the latest version, +GetSipMediaApplicationLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -4019,7 +4261,10 @@ end get_sip_rule(sip_rule_id, params::Dict{String,<:Any}) Retrieves the details of a SIP rule, such as the rule ID, name, triggers, and target -endpoints. +endpoints. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetSipRule, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_rule_id`: The SIP rule ID. @@ -4124,7 +4369,11 @@ end get_voice_connector(voice_connector_id, params::Dict{String,<:Any}) Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps,name, -outbound host, and encryption requirements. +outbound host, and encryption requirements. This API is is no longer supported and will +not be updated. We recommend using the latest version, GetVoiceConnector, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4159,7 +4408,11 @@ end get_voice_connector_emergency_calling_configuration(voice_connector_id, params::Dict{String,<:Any}) Gets the emergency calling configuration details for the specified Amazon Chime Voice -Connector. +Connector. This API is is no longer supported and will not be updated. We recommend using +the latest version, GetVoiceConnectorEmergencyCallingConfiguration, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4194,7 +4447,11 @@ end get_voice_connector_group(voice_connector_group_id, params::Dict{String,<:Any}) Retrieves details for the specified Amazon Chime Voice Connector group, such as -timestamps,name, and associated VoiceConnectorItems. +timestamps,name, and associated VoiceConnectorItems. This API is is no longer supported +and will not be updated. We recommend using the latest version, GetVoiceConnectorGroup, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `voice_connector_group_id`: The Amazon Chime Voice Connector group ID. @@ -4229,7 +4486,11 @@ end get_voice_connector_logging_configuration(voice_connector_id, params::Dict{String,<:Any}) Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. -Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs. +Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs. This +API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4264,6 +4525,10 @@ end get_voice_connector_origination(voice_connector_id, params::Dict{String,<:Any}) Retrieves origination setting details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorOrigination, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4297,7 +4562,11 @@ end get_voice_connector_proxy(voice_connector_id) get_voice_connector_proxy(voice_connector_id, params::Dict{String,<:Any}) -Gets the proxy configuration details for the specified Amazon Chime Voice Connector. +Gets the proxy configuration details for the specified Amazon Chime Voice Connector. This +API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorProxy, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime voice connector ID. @@ -4333,7 +4602,11 @@ end Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also -shows the retention period, in hours, for the Amazon Kinesis data. +shows the retention period, in hours, for the Amazon Kinesis data. This API is is no +longer supported and will not be updated. We recommend using the latest version, +GetVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4368,6 +4641,10 @@ end get_voice_connector_termination(voice_connector_id, params::Dict{String,<:Any}) Retrieves termination setting details for the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorTermination, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -4401,6 +4678,10 @@ end get_voice_connector_termination_health(voice_connector_id) get_voice_connector_termination_health(voice_connector_id, params::Dict{String,<:Any}) + This API is is no longer supported and will not be updated. We recommend using the latest +version, GetVoiceConnectorTerminationHealth, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. Retrieves information about the last time a SIP OPTIONS ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector. @@ -4506,7 +4787,11 @@ end list_app_instance_admins(app_instance_arn) list_app_instance_admins(app_instance_arn, params::Dict{String,<:Any}) -Returns a list of the administrators in the AppInstance. +Returns a list of the administrators in the AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListAppInstanceAdmins, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_arn`: The ARN of the AppInstance. @@ -4545,7 +4830,11 @@ end list_app_instance_users(app-instance-arn) list_app_instance_users(app-instance-arn, params::Dict{String,<:Any}) -List all AppInstanceUsers created under a single AppInstance. +List all AppInstanceUsers created under a single AppInstance. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListAppInstanceUsers, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-arn`: The ARN of the AppInstance. @@ -4589,7 +4878,11 @@ end list_app_instances() list_app_instances(params::Dict{String,<:Any}) -Lists all Amazon Chime AppInstances created under a single AWS account. +Lists all Amazon Chime AppInstances created under a single AWS account. This API is is no +longer supported and will not be updated. We recommend using the latest version, +ListAppInstances, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4618,7 +4911,9 @@ end list_attendee_tags(attendee_id, meeting_id) list_attendee_tags(attendee_id, meeting_id, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK attendee resource. +Lists the tags applied to an Amazon Chime SDK attendee resource. ListAttendeeTags is not +supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `attendee_id`: The Amazon Chime SDK attendee ID. @@ -4656,7 +4951,10 @@ end Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer -Guide. +Guide. This API is is no longer supported and will not be updated. We recommend using +the latest version, ListAttendees, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -4731,7 +5029,10 @@ end Lists all the users banned from a particular channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannelBans, in the Amazon Chime SDK. Using the +latest version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4771,7 +5072,10 @@ end Lists all channel memberships in a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, ListChannelMemberships, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The maximum number of channel memberships that you want returned. @@ -4817,7 +5121,11 @@ end Lists all channels that a particular AppInstanceUser is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, +ListChannelMembershipsForAppInstanceUser, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4858,7 +5166,11 @@ default, sorted by creation timestamp in descending order. Redacted messages ap results as empty, since they are only redacted, not deleted. Deleted messages do not appear in the results. This action always returns the latest version of an edited message. Also, the x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user -that makes the API call as the value in the header. +that makes the API call as the value in the header. This API is is no longer supported +and will not be updated. We recommend using the latest version, ListChannelMessages, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4904,7 +5216,10 @@ end Lists all the moderators for a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in -the header. +the header. This API is is no longer supported and will not be updated. We recommend +using the latest version, ListChannelModerators, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -4949,7 +5264,10 @@ filters to narrow results. Functionality & restrictions Use privacy = PU retrieve all public channels in the account. Only an AppInstanceAdmin can set privacy = PRIVATE to list the private channels in an account. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannels, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app-instance-arn`: The ARN of the AppInstance. @@ -4996,7 +5314,11 @@ end A list of the channels moderated by an AppInstanceUser. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the -value in the header. +value in the header. This API is is no longer supported and will not be updated. We +recommend using the latest version, ListChannelsModeratedByAppInstanceUser, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5032,7 +5354,11 @@ end list_media_capture_pipelines() list_media_capture_pipelines(params::Dict{String,<:Any}) -Returns a list of media capture pipelines. +Returns a list of media capture pipelines. This API is is no longer supported and will +not be updated. We recommend using the latest version, ListMediaCapturePipelines, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5064,7 +5390,11 @@ end list_meeting_tags(meeting_id) list_meeting_tags(meeting_id, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK meeting resource. +Lists the tags applied to an Amazon Chime SDK meeting resource. This API is is no longer +supported and will not be updated. We recommend using the latest version, +ListTagsForResource, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `meeting_id`: The Amazon Chime SDK meeting ID. @@ -5096,8 +5426,10 @@ end list_meetings() list_meetings(params::Dict{String,<:Any}) - Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon -Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime SDK Developer Guide. +Lists up to 100 active Amazon Chime SDK meetings. ListMeetings is not supported in the +Amazon Chime SDK Meetings Namespace. Update your application to remove calls to this API. +For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the +Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5183,7 +5515,11 @@ end list_proxy_sessions(voice_connector_id) list_proxy_sessions(voice_connector_id, params::Dict{String,<:Any}) -Lists the proxy sessions for the specified Amazon Chime Voice Connector. +Lists the proxy sessions for the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +ListProxySessions, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime voice connector ID. @@ -5302,7 +5638,11 @@ end list_sip_media_applications() list_sip_media_applications(params::Dict{String,<:Any}) -Lists the SIP media applications under the administrator's AWS account. +Lists the SIP media applications under the administrator's AWS account. This API is is no +longer supported and will not be updated. We recommend using the latest version, +ListSipMediaApplications, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5334,7 +5674,11 @@ end list_sip_rules() list_sip_rules(params::Dict{String,<:Any}) -Lists the SIP rules under the administrator's AWS account. +Lists the SIP rules under the administrator's AWS account. This API is is no longer +supported and will not be updated. We recommend using the latest version, ListSipRules, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5397,7 +5741,12 @@ end list_tags_for_resource(arn) list_tags_for_resource(arn, params::Dict{String,<:Any}) -Lists the tags applied to an Amazon Chime SDK meeting resource. +Lists the tags applied to an Amazon Chime SDK meeting and messaging resources. This API +is is no longer supported and will not be updated. We recommend using the applicable latest +version in the Amazon Chime SDK. For meetings: ListTagsForResource. For messaging: +ListTagsForResource. Using the latest version requires migrating to a dedicated +namespace. For more information, refer to Migrating from the Amazon Chime namespace in the +Amazon Chime SDK Developer Guide. # Arguments - `arn`: The resource ARN. @@ -5468,7 +5817,11 @@ end list_voice_connector_groups() list_voice_connector_groups(params::Dict{String,<:Any}) -Lists the Amazon Chime Voice Connector groups for the administrator's AWS account. +Lists the Amazon Chime Voice Connector groups for the administrator's AWS account. This +API is is no longer supported and will not be updated. We recommend using the latest +version, ListVoiceConnectorGroups, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5499,7 +5852,11 @@ end list_voice_connector_termination_credentials(voice_connector_id) list_voice_connector_termination_credentials(voice_connector_id, params::Dict{String,<:Any}) -Lists the SIP credentials for the specified Amazon Chime Voice Connector. +Lists the SIP credentials for the specified Amazon Chime Voice Connector. This API is is +no longer supported and will not be updated. We recommend using the latest version, +ListVoiceConnectorTerminationCredentials, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -5533,7 +5890,11 @@ end list_voice_connectors() list_voice_connectors(params::Dict{String,<:Any}) -Lists the Amazon Chime Voice Connectors for the administrator's AWS account. +Lists the Amazon Chime Voice Connectors for the administrator's AWS account. This API is +is no longer supported and will not be updated. We recommend using the latest version, +ListVoiceConnectors, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5595,7 +5956,11 @@ end put_app_instance_retention_settings(app_instance_retention_settings, app_instance_arn) put_app_instance_retention_settings(app_instance_retention_settings, app_instance_arn, params::Dict{String,<:Any}) -Sets the amount of time in days that a given AppInstance retains data. +Sets the amount of time in days that a given AppInstance retains data. This API is is no +longer supported and will not be updated. We recommend using the latest version, +PutAppInstanceRetentionSettings, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_retention_settings`: The time in days to retain data. Data type: number. @@ -5642,7 +6007,11 @@ end put_app_instance_streaming_configurations(app_instance_streaming_configurations, app_instance_arn) put_app_instance_streaming_configurations(app_instance_streaming_configurations, app_instance_arn, params::Dict{String,<:Any}) -The data streaming configurations of an AppInstance. +The data streaming configurations of an AppInstance. This API is is no longer supported +and will not be updated. We recommend using the latest version, +PutMessagingStreamingConfigurations, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `app_instance_streaming_configurations`: The streaming configurations set for an @@ -5783,7 +6152,11 @@ end put_sip_media_application_logging_configuration(sip_media_application_id) put_sip_media_application_logging_configuration(sip_media_application_id, params::Dict{String,<:Any}) -Updates the logging configuration for the specified SIP media application. +Updates the logging configuration for the specified SIP media application. This API is is +no longer supported and will not be updated. We recommend using the latest version, +PutSipMediaApplicationLoggingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -5823,7 +6196,11 @@ end Puts emergency calling configuration details to the specified Amazon Chime Voice Connector, such as emergency phone numbers and calling countries. Origination and termination settings must be enabled for the Amazon Chime Voice Connector before emergency calling can be -configured. +configured. This API is is no longer supported and will not be updated. We recommend +using the latest version, PutVoiceConnectorEmergencyCallingConfiguration, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `emergency_calling_configuration`: The emergency calling configuration details. @@ -5872,7 +6249,10 @@ end Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon -CloudWatch Logs. +CloudWatch Logs. This API is is no longer supported and will not be updated. We recommend +using the latest version, PutVoiceConnectorLoggingConfiguration, in the Amazon Chime SDK. +Using the latest version requires migrating to a dedicated namespace. For more information, +refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `logging_configuration`: The logging configuration details to add. @@ -5919,7 +6299,11 @@ end Adds origination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to -turning off origination settings. +turning off origination settings. This API is is no longer supported and will not be +updated. We recommend using the latest version, PutVoiceConnectorOrigination, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `origination`: The origination setting details to add. @@ -5959,6 +6343,10 @@ end put_voice_connector_proxy(default_session_expiry_minutes, phone_number_pool_countries, voice_connector_id, params::Dict{String,<:Any}) Puts the specified proxy configuration to the specified Amazon Chime Voice Connector. +This API is is no longer supported and will not be updated. We recommend using the latest +version, PutVoiceConnectorProxy, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `default_session_expiry_minutes`: The default number of minutes allowed for proxy @@ -6020,7 +6408,11 @@ end Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to -Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data. +Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data. This +API is is no longer supported and will not be updated. We recommend using the latest +version, PutVoiceConnectorStreamingConfiguration, in the Amazon Chime SDK. Using the latest +version requires migrating to a dedicated namespace. For more information, refer to +Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `streaming_configuration`: The streaming configuration details to add. @@ -6067,7 +6459,11 @@ end Adds termination settings for the specified Amazon Chime Voice Connector. If emergency calling is configured for the Amazon Chime Voice Connector, it must be deleted prior to -turning off termination settings. +turning off termination settings. This API is is no longer supported and will not be +updated. We recommend using the latest version, PutVoiceConnectorTermination, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `termination`: The termination setting details to add. @@ -6106,7 +6502,11 @@ end put_voice_connector_termination_credentials(voice_connector_id) put_voice_connector_termination_credentials(voice_connector_id, params::Dict{String,<:Any}) -Adds termination SIP credentials for the specified Amazon Chime Voice Connector. +Adds termination SIP credentials for the specified Amazon Chime Voice Connector. This API +is is no longer supported and will not be updated. We recommend using the latest version, +PutVoiceConnectorTerminationCredentials, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `voice_connector_id`: The Amazon Chime Voice Connector ID. @@ -6146,7 +6546,10 @@ end Redacts message content, but not metadata. The message exists in the back end, but the action returns null content, and the state shows as redacted. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call -as the value in the header. +as the value in the header. This API is is no longer supported and will not be updated. +We recommend using the latest version, RedactChannelMessage, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel containing the messages that you want to redact. @@ -6416,7 +6819,10 @@ Sends a message to a particular channel that the member is a part of. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. Also, STANDARD messages can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of data and no -metadata. +metadata. This API is is no longer supported and will not be updated. We recommend using +the latest version, SendChannelMessage, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `client_request_token`: The Idempotency token for each client request. @@ -6493,6 +6899,10 @@ parameter and which combinations are valid, refer to the StartStreamTranscriptio the Amazon Transcribe Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services. +This API is is no longer supported and will not be updated. We recommend using the latest +version, StartMeetingTranscription, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `transcription_configuration`: The configuration for the current transcription operation. @@ -6538,7 +6948,11 @@ end stop_meeting_transcription(meeting_id) stop_meeting_transcription(meeting_id, params::Dict{String,<:Any}) -Stops transcription for the specified meetingId. +Stops transcription for the specified meetingId. This API is is no longer supported and +will not be updated. We recommend using the latest version, StopMeetingTranscription, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `meeting_id`: The unique ID of the meeting for which you stop transcription. @@ -6572,7 +6986,9 @@ end tag_attendee(tags, attendee_id, meeting_id) tag_attendee(tags, attendee_id, meeting_id, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK attendee. +Applies the specified tags to the specified Amazon Chime attendee. TagAttendee is not +supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `tags`: The tag key-value pairs. @@ -6611,7 +7027,11 @@ end tag_meeting(tags, meeting_id) tag_meeting(tags, meeting_id, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK meeting. +Applies the specified tags to the specified Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +TagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `tags`: The tag key-value pairs. @@ -6646,7 +7066,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies the specified tags to the specified Amazon Chime SDK meeting resource. +Applies the specified tags to the specified Amazon Chime SDK meeting resource. This API +is is no longer supported and will not be updated. We recommend using the latest version, +TagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `resource_arn`: The resource ARN. @@ -6687,7 +7111,9 @@ end untag_attendee(tag_keys, attendee_id, meeting_id) untag_attendee(tag_keys, attendee_id, meeting_id, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK attendee. +Untags the specified tags from the specified Amazon Chime SDK attendee. UntagAttendee is +not supported in the Amazon Chime SDK Meetings Namespace. Update your application to remove +calls to this API. # Arguments - `tag_keys`: The tag keys. @@ -6726,7 +7152,11 @@ end untag_meeting(tag_keys, meeting_id) untag_meeting(tag_keys, meeting_id, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK meeting. +Untags the specified tags from the specified Amazon Chime SDK meeting. This API is is no +longer supported and will not be updated. We recommend using the latest version, +UntagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `tag_keys`: The tag keys. @@ -6763,7 +7193,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Untags the specified tags from the specified Amazon Chime SDK meeting resource. +Untags the specified tags from the specified Amazon Chime SDK meeting resource. Applies the +specified tags to the specified Amazon Chime SDK meeting resource. This API is is no +longer supported and will not be updated. We recommend using the latest version, +UntagResource, in the Amazon Chime SDK. Using the latest version requires migrating to a +dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `resource_arn`: The resource ARN. @@ -6887,7 +7322,10 @@ end update_app_instance(name, app_instance_arn) update_app_instance(name, app_instance_arn, params::Dict{String,<:Any}) -Updates AppInstance metadata. +Updates AppInstance metadata. This API is is no longer supported and will not be updated. +We recommend using the latest version, UpdateAppInstance, in the Amazon Chime SDK. Using +the latest version requires migrating to a dedicated namespace. For more information, refer +to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name that you want to change. @@ -6927,7 +7365,11 @@ end update_app_instance_user(name, app_instance_user_arn) update_app_instance_user(name, app_instance_user_arn, params::Dict{String,<:Any}) -Updates the details of an AppInstanceUser. You can update names and metadata. +Updates the details of an AppInstanceUser. You can update names and metadata. This API is +is no longer supported and will not be updated. We recommend using the latest version, +UpdateAppInstanceUser, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the AppInstanceUser. @@ -7007,7 +7449,11 @@ end Update a channel's attributes. Restriction: You can't change a channel's privacy. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateChannel, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `mode`: The mode of the update request. @@ -7054,6 +7500,10 @@ end Updates the content of a message. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that makes the API call as the value in the header. +This API is is no longer supported and will not be updated. We recommend using the latest +version, UpdateChannelMessage, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -7096,7 +7546,11 @@ end The details of the time when a user last read messages in a channel. The x-amz-chime-bearer request header is mandatory. Use the AppInstanceUserArn of the user that -makes the API call as the value in the header. +makes the API call as the value in the header. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateChannelReadMarker, in the +Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. For +more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime +SDK Developer Guide. # Arguments - `channel_arn`: The ARN of the channel. @@ -7239,7 +7693,11 @@ end update_proxy_session(capabilities, proxy_session_id, voice_connector_id) update_proxy_session(capabilities, proxy_session_id, voice_connector_id, params::Dict{String,<:Any}) -Updates the specified proxy session details, such as voice or SMS capabilities. +Updates the specified proxy session details, such as voice or SMS capabilities. This API +is is no longer supported and will not be updated. We recommend using the latest version, +UpdateProxySession, in the Amazon Chime SDK. Using the latest version requires migrating to +a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `capabilities`: The proxy session capabilities. @@ -7368,7 +7826,11 @@ end update_sip_media_application(sip_media_application_id) update_sip_media_application(sip_media_application_id, params::Dict{String,<:Any}) -Updates the details of the specified SIP media application. +Updates the details of the specified SIP media application. This API is is no longer +supported and will not be updated. We recommend using the latest version, +UpdateSipMediaApplication, in the Amazon Chime SDK. Using the latest version requires +migrating to a dedicated namespace. For more information, refer to Migrating from the +Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `sip_media_application_id`: The SIP media application ID. @@ -7407,7 +7869,11 @@ end update_sip_media_application_call(arguments, sip_media_application_id, transaction_id, params::Dict{String,<:Any}) Invokes the AWS Lambda function associated with the SIP media application and transaction -ID in an update request. The Lambda function can then return a new set of actions. +ID in an update request. The Lambda function can then return a new set of actions. This +API is is no longer supported and will not be updated. We recommend using the latest +version, UpdateSipMediaApplicationCall, in the Amazon Chime SDK. Using the latest version +requires migrating to a dedicated namespace. For more information, refer to Migrating from +the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. # Arguments - `arguments`: Arguments made available to the Lambda function as part of the @@ -7452,7 +7918,11 @@ end update_sip_rule(name, sip_rule_id) update_sip_rule(name, sip_rule_id, params::Dict{String,<:Any}) -Updates the details of the specified SIP rule. +Updates the details of the specified SIP rule. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateSipRule, in the Amazon +Chime SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `name`: The new name for the specified SIP rule. @@ -7573,7 +8043,11 @@ end update_voice_connector(name, require_encryption, voice_connector_id) update_voice_connector(name, require_encryption, voice_connector_id, params::Dict{String,<:Any}) -Updates details for the specified Amazon Chime Voice Connector. +Updates details for the specified Amazon Chime Voice Connector. This API is is no longer +supported and will not be updated. We recommend using the latest version, +UpdateVoiceConnector, in the Amazon Chime SDK. Using the latest version requires migrating +to a dedicated namespace. For more information, refer to Migrating from the Amazon Chime +namespace in the Amazon Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector. @@ -7623,7 +8097,11 @@ end update_voice_connector_group(name, voice_connector_items, voice_connector_group_id, params::Dict{String,<:Any}) Updates details of the specified Amazon Chime Voice Connector group, such as the name and -Amazon Chime Voice Connector priority ranking. +Amazon Chime Voice Connector priority ranking. This API is is no longer supported and +will not be updated. We recommend using the latest version, UpdateVoiceConnectorGroup, in +the Amazon Chime SDK. Using the latest version requires migrating to a dedicated namespace. +For more information, refer to Migrating from the Amazon Chime namespace in the Amazon +Chime SDK Developer Guide. # Arguments - `name`: The name of the Amazon Chime Voice Connector group. @@ -7676,7 +8154,11 @@ end Validates an address to be used for 911 calls made with Amazon Chime Voice Connectors. You can use validated addresses in a Presence Information Data Format Location Object file that you include in SIP requests. That helps ensure that addresses are routed to the appropriate -Public Safety Answering Point. +Public Safety Answering Point. This API is is no longer supported and will not be +updated. We recommend using the latest version, ValidateE911Address, in the Amazon Chime +SDK. Using the latest version requires migrating to a dedicated namespace. For more +information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK +Developer Guide. # Arguments - `aws_account_id`: The AWS account ID. diff --git a/src/services/chime_sdk_identity.jl b/src/services/chime_sdk_identity.jl index 50e09d6fa8..4bf90ed009 100644 --- a/src/services/chime_sdk_identity.jl +++ b/src/services/chime_sdk_identity.jl @@ -1145,6 +1145,9 @@ Updates the name and metadata of an AppInstanceBot. - `name`: The name of the AppInstanceBot. - `app_instance_bot_arn`: The ARN of the AppInstanceBot. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Configuration"`: The configuration for the bot update. """ function update_app_instance_bot( Metadata, Name, appInstanceBotArn; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 57bcfebc07..58cc9281eb 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -149,7 +149,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MediaInsightsRuntimeMetadata"`: The runtime metadata for the media insights pipeline. Consists of a key-value map of strings. - `"S3RecordingSinkRuntimeConfiguration"`: The runtime configuration for the S3 recording - sink. + sink. If specified, the settings in this structure override any settings in + S3RecordingSinkConfiguration. - `"Tags"`: The tags assigned to the media insights pipeline. """ function create_media_insights_pipeline( diff --git a/src/services/chime_sdk_messaging.jl b/src/services/chime_sdk_messaging.jl index d8daa84d7f..296c8e4820 100644 --- a/src/services/chime_sdk_messaging.jl +++ b/src/services/chime_sdk_messaging.jl @@ -132,10 +132,10 @@ end channel_flow_callback(callback_id, channel_message, channel_arn) channel_flow_callback(callback_id, channel_message, channel_arn, params::Dict{String,<:Any}) -Calls back Chime SDK Messaging with a processing response message. This should be invoked -from the processor Lambda. This is a developer API. You can return one of the following -processing responses: Update message content or metadata Deny a message Make no -changes to the message +Calls back Amazon Chime SDK messaging with a processing response message. This should be +invoked from the processor Lambda. This is a developer API. You can return one of the +following processing responses: Update message content or metadata Deny a message +Make no changes to the message # Arguments - `callback_id`: The identifier passed to the processor by the service when invoked. Use @@ -341,7 +341,7 @@ channel flows with channels, and the processors in the channel flow then take ac messages sent to that channel. This is a developer API. Channel flows process the following items: New and updated messages Persistent and non-persistent messages The Standard message type Channel flows don't process Control or System messages. For more -information about the message types provided by Chime SDK Messaging, refer to Message types +information about the message types provided by Chime SDK messaging, refer to Message types in the Amazon Chime developer guide. # Arguments @@ -1329,8 +1329,8 @@ end get_channel_membership_preferences(channel_arn, member_arn, x-amz-chime-bearer, params::Dict{String,<:Any}) Gets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified -channel. A user or a bot must be a member of the channel and own the membership to be able -to retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel +channel. A user or a bot must be a member of the channel and own the membership in order to +retrieve membership preferences. Users or bots in the AppInstanceAdmin and channel moderator roles can't retrieve preferences for other users or bots. Banned users or bots can't retrieve membership preferences for the channel from which they are banned. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or @@ -1453,11 +1453,10 @@ status of messages going through channel flow processing. The API provides an al to retrieving message status if the event was not received because a client wasn't connected to a websocket. Messages can have any one of these statuses. SENT Message processed successfully PENDING Ongoing processing FAILED Processing failed DENIED -Messasge denied by the processor This API does not return statuses for denied -messages, because we don't store them once the processor denies them. Only the message -sender can invoke this API. The x-amz-chime-bearer request header is mandatory. Use the -ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the -header. +Message denied by the processor This API does not return statuses for denied messages, +because we don't store them once the processor denies them. Only the message sender can +invoke this API. The x-amz-chime-bearer request header is mandatory. Use the ARN of the +AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. # Arguments - `channel_arn`: The ARN of the channel @@ -1739,7 +1738,7 @@ end list_channel_memberships_for_app_instance_user(x-amz-chime-bearer) list_channel_memberships_for_app_instance_user(x-amz-chime-bearer, params::Dict{String,<:Any}) - Lists all channels that anr AppInstanceUser or AppInstanceBot is a part of. Only an + Lists all channels that an AppInstanceUser or AppInstanceBot is a part of. Only an AppInstanceAdmin can call the API with a user ARN that is not their own. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. @@ -2210,13 +2209,13 @@ end put_channel_membership_preferences(preferences, channel_arn, member_arn, x-amz-chime-bearer) put_channel_membership_preferences(preferences, channel_arn, member_arn, x-amz-chime-bearer, params::Dict{String,<:Any}) -Sets the membership preferences of an AppInstanceUser or AppIntanceBot for the specified +Sets the membership preferences of an AppInstanceUser or AppInstanceBot for the specified channel. The user or bot must be a member of the channel. Only the user or bot who owns the membership can set preferences. Users or bots in the AppInstanceAdmin and channel moderator -roles can't set preferences for other users or users. Banned users or bots can't set -membership preferences for the channel from which they are banned. The x-amz-chime-bearer -request header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes -the API call as the value in the header. +roles can't set preferences for other users. Banned users or bots can't set membership +preferences for the channel from which they are banned. The x-amz-chime-bearer request +header is mandatory. Use the ARN of an AppInstanceUser or AppInstanceBot that makes the API +call as the value in the header. # Arguments - `preferences`: The channel membership preferences of an AppInstanceUser . @@ -2424,15 +2423,19 @@ end Sends a message to a particular channel that the member is a part of. The x-amz-chime-bearer request header is mandatory. Use the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the value in the header. Also, STANDARD messages -can contain 4KB of data and the 1KB of metadata. CONTROL messages can contain 30 bytes of -data and no metadata. +can be up to 4KB in size and contain metadata. Metadata is arbitrary, and you can use it in +a variety of ways, such as containing a link to an attachment. CONTROL messages are +limited to 30 bytes and do not contain metadata. # Arguments - `client_request_token`: The Idempotency token for each client request. -- `content`: The content of the message. +- `content`: The content of the channel message. - `persistence`: Boolean that controls whether the message is persisted on the back end. Required. -- `type`: The type of message, STANDARD or CONTROL. +- `type`: The type of message, STANDARD or CONTROL. STANDARD messages can be up to 4KB in + size and contain metadata. Metadata is arbitrary, and you can use it in a variety of ways, + such as containing a link to an attachment. CONTROL messages are limited to 30 bytes and + do not contain metadata. - `channel_arn`: The ARN of the channel. - `x-amz-chime-bearer`: The ARN of the AppInstanceUser or AppInstanceBot that makes the API call. @@ -2445,6 +2448,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Metadata"`: The optional metadata for each message. - `"PushNotification"`: The push notification configuration of the message. - `"SubChannelId"`: The ID of the SubChannel in the request. +- `"Target"`: The target of a message. Must be a member of the channel, such as another + user, a bot, or the sender. Only the target and the sender can view targeted messages. Only + users who can see targeted messages can take actions on them. However, administrators can + delete targeted messages that they can’t see. """ function send_channel_message( ClientRequestToken, @@ -2693,7 +2700,7 @@ the ARN of the AppInstanceUser or AppInstanceBot that makes the API call as the the header. # Arguments -- `content`: The content of the message being updated. +- `content`: The content of the channel message. - `channel_arn`: The ARN of the channel. - `message_id`: The ID string of the message being updated. - `x-amz-chime-bearer`: The ARN of the AppInstanceUser or AppInstanceBot that makes the API diff --git a/src/services/cleanrooms.jl b/src/services/cleanrooms.jl index e1544eef42..30d5d62b68 100644 --- a/src/services/cleanrooms.jl +++ b/src/services/cleanrooms.jl @@ -134,7 +134,7 @@ Creates a new configured table resource. - `analysis_method`: The analysis method for the configured tables. The only valid value is currently `DIRECT_QUERY`. - `name`: The name of the configured table. -- `table_reference`: A reference to the AWS Glue table being configured. +- `table_reference`: A reference to the Glue table being configured. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1189,7 +1189,7 @@ end start_protected_query(membership_identifier, result_configuration, sql_parameters, type) start_protected_query(membership_identifier, result_configuration, sql_parameters, type, params::Dict{String,<:Any}) -Creates a protected query that is started by AWS Clean Rooms. +Creates a protected query that is started by Clean Rooms . # Arguments - `membership_identifier`: A unique identifier for the membership to run this query diff --git a/src/services/cloudcontrol.jl b/src/services/cloudcontrol.jl index dc2e1bff51..35d608ec04 100644 --- a/src/services/cloudcontrol.jl +++ b/src/services/cloudcontrol.jl @@ -54,18 +54,12 @@ using the RequestToken of the ProgressEvent type returned by CreateResource. # Arguments - `desired_state`: Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values. Cloud Control API - currently supports JSON as a structured data format. <p>Specify the desired state - as one of the following:</p> <ul> <li> <p>A JSON blob</p> - </li> <li> <p>A local path containing the desired state in JSON data - format</p> </li> </ul> <p>For more information, see <a - href="https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations - -create.html#resource-operations-create-desiredstate">Composing the desired state - of the resource</a> in the <i>Amazon Web Services Cloud Control API User - Guide</i>.</p> <p>For more information about the properties of a specific - resource, refer to the related topic for the resource in the <a - href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resou - rce-type-ref.html">Resource and property types reference</a> in the - <i>CloudFormation Users Guide</i>.</p> + currently supports JSON as a structured data format. Specify the desired state as one of + the following: A JSON blob A local path containing the desired state in JSON data + format For more information, see Composing the desired state of the resource in the + Amazon Web Services Cloud Control API User Guide. For more information about the properties + of a specific resource, refer to the related topic for the resource in the Resource and + property types reference in the CloudFormation Users Guide. - `type_name`: The name of the resource type. # Optional Parameters diff --git a/src/services/cloudformation.jl b/src/services/cloudformation.jl index 169183f478..163e6b8dd0 100644 --- a/src/services/cloudformation.jl +++ b/src/services/cloudformation.jl @@ -342,6 +342,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NotificationARNs"`: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. +- `"OnStackFailure"`: Determines what action will be taken if stack creation fails. If this + parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation + must not be specified. This must be one of these values: DELETE - Deletes the change set + if the stack creation fails. This is only valid when the ChangeSetType parameter is set to + CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. + DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true + for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if + the stack creation fails, roll back the stack. This is equivalent to specifying false for + the DisableRollback parameter to the ExecuteChangeSet API operation. For nested stacks, + when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, + any failure in a child stack will cause the parent stack creation to fail and all stacks to + be deleted. - `"Parameters"`: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. - `"ResourceTypes"`: The template resource types that you have permissions to work with if @@ -495,6 +507,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. +- `"RetainExceptOnCreate"`: This deletion policy deletes newly created resources, but + retains existing resources, when a stack operation is rolled back. This ensures new, empty, + and unused resources are deleted, while critical resources and their data are retained. + RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy + attribute. - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all @@ -2044,7 +2061,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that CloudFormation successfully received them. - `"DisableRollback"`: Preserves the state of previously provisioned resources when an - operation fails. Default: True + operation fails. This parameter can't be specified when the OnStackFailure parameter to the + CreateChangeSet API operation was specified. True - if the stack creation fails, do + nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to + the CreateChangeSet API operation. False - if the stack creation fails, roll back the + stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the + CreateChangeSet API operation. Default: True +- `"RetainExceptOnCreate"`: This deletion policy deletes newly created resources, but + retains existing resources, when a stack operation is rolled back. This ensures new, empty, + and unused resources are deleted, while critical resources and their data are retained. + RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy + attribute. - `"StackName"`: If you specified the name of a change set, specify the stack name or Amazon Resource Name (ARN) that's associated with the change set you want to execute. """ @@ -2179,6 +2206,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys and a maximum length of 51,200 bytes. For more information about templates, see Template anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. +- `"TemplateSummaryConfig"`: Specifies options for the GetTemplateSummary API action. - `"TemplateURL"`: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template anatomy in the @@ -2365,6 +2393,93 @@ function list_imports( ) end +""" + list_stack_instance_resource_drifts(operation_id, stack_instance_account, stack_instance_region, stack_set_name) + list_stack_instance_resource_drifts(operation_id, stack_instance_account, stack_instance_region, stack_set_name, params::Dict{String,<:Any}) + +Returns drift information for resources in a stack instance. +ListStackInstanceResourceDrifts returns drift information for the most recent drift +detection operation. If an operation is in progress, it may only return partial results. + +# Arguments +- `operation_id`: The unique ID of the drift operation. +- `stack_instance_account`: The name of the Amazon Web Services account that you want to + list resource drifts for. +- `stack_instance_region`: The name of the Region where you want to list resource drifts. +- `stack_set_name`: The name or unique ID of the stack set that you want to list drifted + resources for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CallAs"`: [Service-managed permissions] Specifies whether you are acting as an account + administrator in the organization's management account or as a delegated administrator in a + member account. By default, SELF is specified. Use SELF for stack sets with self-managed + permissions. If you are signed in to the management account, specify SELF. If you are + signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web + Services account must be registered as a delegated administrator in the management account. + For more information, see Register a delegated administrator in the CloudFormation User + Guide. +- `"MaxResults"`: The maximum number of results to be returned with a single call. If the + number of available results exceeds this maximum, the response includes a NextToken value + that you can assign to the NextToken request parameter to get the next set of results. +- `"NextToken"`: If the previous paginated request didn't return all of the remaining + results, the response object's NextToken parameter value is set to a token. To retrieve the + next set of results, call this action again and assign that token to the request object's + NextToken parameter. If there are no remaining results, the previous response object's + NextToken parameter is set to null. +- `"StackInstanceResourceDriftStatuses"`: The resource drift status of the stack instance. + DELETED: The resource differs from its expected template configuration in that the + resource has been deleted. MODIFIED: One or more resource properties differ from their + expected template values. IN_SYNC: The resource's actual configuration matches its + expected template configuration. NOT_CHECKED: CloudFormation doesn't currently return + this value. +""" +function list_stack_instance_resource_drifts( + OperationId, + StackInstanceAccount, + StackInstanceRegion, + StackSetName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListStackInstanceResourceDrifts", + Dict{String,Any}( + "OperationId" => OperationId, + "StackInstanceAccount" => StackInstanceAccount, + "StackInstanceRegion" => StackInstanceRegion, + "StackSetName" => StackSetName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_stack_instance_resource_drifts( + OperationId, + StackInstanceAccount, + StackInstanceRegion, + StackSetName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cloudformation( + "ListStackInstanceResourceDrifts", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "OperationId" => OperationId, + "StackInstanceAccount" => StackInstanceAccount, + "StackInstanceRegion" => StackInstanceRegion, + "StackSetName" => StackSetName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stack_instances(stack_set_name) list_stack_instances(stack_set_name, params::Dict{String,<:Any}) @@ -3029,6 +3144,11 @@ UPDATE_ROLLBACK_COMPLETE IMPORT_COMPLETE IMPORT_ROLLBACK_COMPLETE # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for this RollbackStack request. +- `"RetainExceptOnCreate"`: This deletion policy deletes newly created resources, but + retains existing resources, when a stack operation is rolled back. This ensures new, empty, + and unused resources are deleted, while critical resources and their data are retained. + RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy + attribute. - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the stack. """ @@ -3440,6 +3560,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. +- `"RetainExceptOnCreate"`: This deletion policy deletes newly created resources, but + retains existing resources, when a stack operation is rolled back. This ensures new, empty, + and unused resources are deleted, while critical resources and their data are retained. + RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy + attribute. - `"RoleARN"`: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 9592ea1e62..88dca2d092 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -59,7 +59,8 @@ distribution. A staging distribution is a copy of an existing distribution (call primary distribution) that you can use in a continuous deployment workflow. After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to -incrementally move traffic to the staging distribution. +incrementally move traffic to the staging distribution. This API operation requires the +following IAM permissions: GetDistribution CreateDistribution CopyDistribution # Arguments - `caller_reference`: A value that uniquely identifies a request to create a resource. This @@ -70,6 +71,10 @@ incrementally move traffic to the staging distribution. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Enabled"`: A Boolean flag to specify the state of the staging distribution when it's + created. When you set this value to True, the staging distribution is enabled. When you set + this value to False, the staging distribution is disabled. If you omit this field, the + default value is True. - `"If-Match"`: The version identifier of the primary distribution whose configuration you are copying. This is the ETag value returned in the response to GetDistribution and GetDistributionConfig. @@ -305,7 +310,8 @@ end create_distribution_with_tags2020_05_31(distribution_config_with_tags) create_distribution_with_tags2020_05_31(distribution_config_with_tags, params::Dict{String,<:Any}) -Create a new distribution with tags. +Create a new distribution with tags. This API operation requires the following IAM +permissions: CreateDistribution TagResource # Arguments - `distribution_config_with_tags`: The distribution's configuration information. @@ -3998,7 +4004,8 @@ distribution. After using a continuous deployment policy to move a portion of yo name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your -domain's traffic back to the primary distribution. +domain's traffic back to the primary distribution. This API operation requires the +following IAM permissions: GetDistribution UpdateDistribution # Arguments - `id`: The identifier of the primary distribution to which you are copying a staging diff --git a/src/services/cloudwatch_logs.jl b/src/services/cloudwatch_logs.jl index 54c990e424..78f92e1633 100644 --- a/src/services/cloudwatch_logs.jl +++ b/src/services/cloudwatch_logs.jl @@ -5,51 +5,76 @@ using AWS.Compat using AWS.UUIDs """ - associate_kms_key(kms_key_id, log_group_name) - associate_kms_key(kms_key_id, log_group_name, params::Dict{String,<:Any}) - -Associates the specified KMS key with the specified log group. Associating a KMS key with a -log group overrides any existing associations between the log group and a KMS key. After a -KMS key is associated with a log group, all newly ingested data for the log group is -encrypted using the KMS key. This association is stored as long as the data encrypted with -the KMS keyis still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this -data whenever it is requested. CloudWatch Logs supports only symmetric KMS keys. Do not -use an associate an asymmetric KMS key with your log group. For more information, see Using -Symmetric and Asymmetric Keys. It can take up to 5 minutes for this operation to take -effect. If you attempt to associate a KMS key with a log group but the KMS key does not -exist or the KMS key is disabled, you receive an InvalidParameterException error. + associate_kms_key(kms_key_id) + associate_kms_key(kms_key_id, params::Dict{String,<:Any}) + +Associates the specified KMS key with either one log group in the account, or with all +stored CloudWatch Logs query insights results in the account. When you use AssociateKmsKey, +you specify either the logGroupName parameter or the resourceIdentifier parameter. You +can't specify both of those parameters in the same operation. Specify the logGroupName +parameter to cause all log events stored in the log group to be encrypted with that key. +Only the log events ingested after the key is associated are encrypted with that key. +Associating a KMS key with a log group overrides any existing associations between the log +group and a KMS key. After a KMS key is associated with a log group, all newly ingested +data for the log group is encrypted using the KMS key. This association is stored as long +as the data encrypted with the KMS key is still within CloudWatch Logs. This enables +CloudWatch Logs to decrypt this data whenever it is requested. Associating a key with a log +group does not cause the results of queries of that log group to be encrypted with that +key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey +operation with the resourceIdentifier parameter that specifies a query-result resource. +Specify the resourceIdentifier parameter with a query-result resource, to use that key to +encrypt the stored results of all future StartQuery operations in the account. The response +from a GetQueryResults operation will still return the query results in plain text. Even if +you have not associated a key with your query results, the query results are encrypted when +stored, using the default CloudWatch Logs method. If you run a query from a monitoring +account that queries logs in a source account, the query results key from the monitoring +account, if any, is used. If you delete the key that is used to encrypt log events or +log group query results, then all the associated stored log events or query results that +were encrypted with that key will be unencryptable and unusable. CloudWatch Logs supports +only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group +or query results. For more information, see Using Symmetric and Asymmetric Keys. It can +take up to 5 minutes for this operation to take effect. If you attempt to associate a KMS +key with a log group but the KMS key does not exist or the KMS key is disabled, you receive +an InvalidParameterException error. # Arguments - `kms_key_id`: The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. This must be a symmetric KMS key. For more information, see Amazon Resource Names and Using Symmetric and Asymmetric Keys. -- `log_group_name`: The name of the log group. -""" -function associate_kms_key( - kmsKeyId, logGroupName; aws_config::AbstractAWSConfig=global_aws_config() -) +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"logGroupName"`: The name of the log group. In your AssociateKmsKey operation, you must + specify either the resourceIdentifier parameter or the logGroup parameter, but you can't + specify both. +- `"resourceIdentifier"`: Specifies the target for this operation. You must specify one of + the following: Specify the following ARN to have future GetQueryResults operations in + this account encrypt the results with the specified KMS key. Replace REGION and ACCOUNT_ID + with your Region and account ID. arn:aws:logs:REGION:ACCOUNT_ID:query-result:* Specify + the ARN of a log group to have CloudWatch Logs use the KMS key to encrypt log events that + are ingested and stored by that log group. The log group ARN must be in the following + format. Replace REGION and ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME In your AssociateKmsKey + operation, you must specify either the resourceIdentifier parameter or the logGroup + parameter, but you can't specify both. +""" +function associate_kms_key(kmsKeyId; aws_config::AbstractAWSConfig=global_aws_config()) return cloudwatch_logs( "AssociateKmsKey", - Dict{String,Any}("kmsKeyId" => kmsKeyId, "logGroupName" => logGroupName); + Dict{String,Any}("kmsKeyId" => kmsKeyId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function associate_kms_key( kmsKeyId, - logGroupName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cloudwatch_logs( "AssociateKmsKey", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("kmsKeyId" => kmsKeyId, "logGroupName" => logGroupName), - params, - ), + mergewith(_merge, Dict{String,Any}("kmsKeyId" => kmsKeyId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1047,41 +1072,53 @@ function describe_subscription_filters( end """ - disassociate_kms_key(log_group_name) - disassociate_kms_key(log_group_name, params::Dict{String,<:Any}) + disassociate_kms_key() + disassociate_kms_key(params::Dict{String,<:Any}) -Disassociates the associated KMS key from the specified log group. After the KMS key is -disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for -the log group. All previously ingested data remains encrypted, and CloudWatch Logs requires -permissions for the KMS key whenever the encrypted data is requested. Note that it can take -up to 5 minutes for this operation to take effect. - -# Arguments -- `log_group_name`: The name of the log group. +Disassociates the specified KMS key from the specified log group or from all CloudWatch +Logs Insights query results in the account. When you use DisassociateKmsKey, you specify +either the logGroupName parameter or the resourceIdentifier parameter. You can't specify +both of those parameters in the same operation. Specify the logGroupName parameter to +stop using the KMS key to encrypt future log events ingested and stored in the log group. +Instead, they will be encrypted with the default CloudWatch Logs method. The log events +that were ingested while the key was associated with the log group are still encrypted with +that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data +is accessed. Specify the resourceIdentifier parameter with the query-result resource to +stop using the KMS key to encrypt the results of all future StartQuery operations in the +account. They will instead be encrypted with the default CloudWatch Logs method. The +results from queries that ran while the key was associated with the account are still +encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key +whenever that data is accessed. It can take up to 5 minutes for this operation to take +effect. -""" -function disassociate_kms_key( - logGroupName; aws_config::AbstractAWSConfig=global_aws_config() -) - return cloudwatch_logs( - "DisassociateKmsKey", - Dict{String,Any}("logGroupName" => logGroupName); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"logGroupName"`: The name of the log group. In your DisassociateKmsKey operation, you + must specify either the resourceIdentifier parameter or the logGroup parameter, but you + can't specify both. +- `"resourceIdentifier"`: Specifies the target for this operation. You must specify one of + the following: Specify the ARN of a log group to stop having CloudWatch Logs use the KMS + key to encrypt log events that are ingested and stored by that log group. After you run + this operation, CloudWatch Logs encrypts ingested log events with the default CloudWatch + Logs method. The log group ARN must be in the following format. Replace REGION and + ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:log-group:LOG_GROUP_NAME Specify the following ARN to + stop using this key to encrypt the results of future StartQuery operations in this account. + Replace REGION and ACCOUNT_ID with your Region and account ID. + arn:aws:logs:REGION:ACCOUNT_ID:query-result:* In your DisssociateKmsKey operation, you + must specify either the resourceIdentifier parameter or the logGroup parameter, but you + can't specify both. +""" +function disassociate_kms_key(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudwatch_logs( + "DisassociateKmsKey"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function disassociate_kms_key( - logGroupName, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return cloudwatch_logs( - "DisassociateKmsKey", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("logGroupName" => logGroupName), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "DisassociateKmsKey", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end @@ -1279,10 +1316,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the ARN. You must include either logGroupIdentifier or logGroupName, but not both. - `"logGroupName"`: The name of the log group to search. You must include either logGroupIdentifier or logGroupName, but not both. -- `"time"`: The time to set as the center of the query. If you specify time, the 15 minutes - before this time are queries. If you omit time, the 8 minutes before and 8 minutes after - this time are searched. The time value is specified as epoch time, which is the number of - seconds since January 1, 1970, 00:00:00 UTC. +- `"time"`: The time to set as the center of the query. If you specify time, the 8 minutes + before and 8 minutes after this time are searched. If you omit time, the most recent 15 + minutes up to the current time are searched. The time value is specified as epoch time, + which is the number of seconds since January 1, 1970, 00:00:00 UTC. """ function get_log_group_fields(; aws_config::AbstractAWSConfig=global_aws_config()) return cloudwatch_logs( @@ -1508,9 +1545,9 @@ policies are cumulative. Any sensitive term specified in either policy is masked # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"scope"`: Currently the only valid value for this parameter is GLOBAL, which specifies - that the data protection policy applies to all log groups in the account. If you omit this - parameter, the default of GLOBAL is used. +- `"scope"`: Currently the only valid value for this parameter is ALL, which specifies that + the data protection policy applies to all log groups in the account. If you omit this + parameter, the default of ALL is used. """ function put_account_policy( policyDocument, @@ -2073,14 +2110,15 @@ through PutLogEvents and have them delivered to a specific destination. When log sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. - A logical destination that belongs to a different account, for cross-account delivery. -An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the -subscription filter, for same-account delivery. An Lambda function that belongs to the -same account as the subscription filter, for same-account delivery. Each log group can -have up to two subscription filters associated with it. If you are updating an existing -filter, you must specify the correct name in filterName. To perform a -PutSubscriptionFilter operation for any destination except a Lambda function, you must also -have the iam:PassRole permission. + A logical destination created with PutDestination that belongs to a different account, +for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data +Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that +belongs to the same account as the subscription filter, for same-account delivery. An +Lambda function that belongs to the same account as the subscription filter, for +same-account delivery. Each log group can have up to two subscription filters associated +with it. If you are updating an existing filter, you must specify the correct name in +filterName. To perform a PutSubscriptionFilter operation for any destination except a +Lambda function, you must also have the iam:PassRole permission. # Arguments - `destination_arn`: The ARN of the destination to deliver matching log events to. @@ -2161,14 +2199,19 @@ end Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch -Logs Insights Query Syntax. Queries time out after 60 minutes of runtime. If your queries -are timing out, reduce the time range being searched or partition your query into a number -of queries. If you are using CloudWatch cross-account observability, you can use this -operation in a monitoring account to start a query in a linked source account. For more -information, see CloudWatch cross-account observability. For a cross-account StartQuery -operation, the query definition must be defined in the monitoring account. You can have up -to 30 concurrent CloudWatch Logs insights queries, including queries that have been added -to dashboards. +Logs Insights Query Syntax. After you run a query using StartQuery, the query results are +stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, +using the queryId that StartQuery returns. If you have associated a KMS key with the query +results in this account, then StartQuery uses that key to encrypt the results when it +stores them. If no key is associated with query results, the query results are encrypted +with the default CloudWatch Logs encryption method. Queries time out after 60 minutes of +runtime. If your queries are timing out, reduce the time range being searched or partition +your query into a number of queries. If you are using CloudWatch cross-account +observability, you can use this operation in a monitoring account to start a query in a +linked source account. For more information, see CloudWatch cross-account observability. +For a cross-account StartQuery operation, the query definition must be defined in the +monitoring account. You can have up to 30 concurrent CloudWatch Logs insights queries, +including queries that have been added to dashboards. # Arguments - `end_time`: The end of the time range to query. The range is inclusive, so the specified @@ -2190,14 +2233,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account. If you specify an ARN, the ARN can't end with an asterisk (*). A StartQuery - operation must include exactly one of the following parameters: logGroupName, logGroupNames - or logGroupIdentifiers. + operation must include exactly one of the following parameters: logGroupName, + logGroupNames, or logGroupIdentifiers. - `"logGroupName"`: The log group on which to perform the query. A StartQuery operation - must include exactly one of the following parameters: logGroupName, logGroupNames or + must include exactly one of the following parameters: logGroupName, logGroupNames, or logGroupIdentifiers. - `"logGroupNames"`: The list of log groups to be queried. You can include up to 50 log groups. A StartQuery operation must include exactly one of the following parameters: - logGroupName, logGroupNames or logGroupIdentifiers. + logGroupName, logGroupNames, or logGroupIdentifiers. """ function start_query( endTime, queryString, startTime; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/codeartifact.jl b/src/services/codeartifact.jl index 29e9cef78e..29750418b1 100644 --- a/src/services/codeartifact.jl +++ b/src/services/codeartifact.jl @@ -1810,7 +1810,7 @@ CodeArtifact User Guide. - `domain`: The name of the domain that contains the repository that contains the package version to publish. - `format`: A format that specifies the type of the package version with the requested - asset file. + asset file. The only supported value is generic. - `package`: The name of the package version to publish. - `repository`: The name of the repository that the package version will be published to. - `version`: The package version to publish (for example, 3.5.2). diff --git a/src/services/codecatalyst.jl b/src/services/codecatalyst.jl index c0341f9cc2..73ff952e7a 100644 --- a/src/services/codecatalyst.jl +++ b/src/services/codecatalyst.jl @@ -167,6 +167,49 @@ function create_project( ) end +""" + create_source_repository(name, project_name, space_name) + create_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Creates an empty Git-based source repository in a specified project. The repository is +created with an initial empty commit with a default branch named main. + +# Arguments +- `name`: The name of the source repository. For more information about name requirements, + see Quotas for source repositories. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the source repository. +""" +function create_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "PUT", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_source_repository_branch(name, project_name, source_repository_name, space_name) create_source_repository_branch(name, project_name, source_repository_name, space_name, params::Dict{String,<:Any}) @@ -288,6 +331,111 @@ function delete_dev_environment( ) end +""" + delete_project(name, space_name) + delete_project(name, space_name, params::Dict{String,<:Any}) + +Deletes a project in a space. + +# Arguments +- `name`: The name of the project in the space. To retrieve a list of project names, use + ListProjects. +- `space_name`: The name of the space. + +""" +function delete_project(name, spaceName; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_project( + name, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_source_repository(name, project_name, space_name) + delete_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Deletes a source repository in Amazon CodeCatalyst. You cannot use this API to delete a +linked repository. It can only be used to delete a Amazon CodeCatalyst source repository. + +# Arguments +- `name`: The name of the source repository. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function delete_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_space(name) + delete_space(name, params::Dict{String,<:Any}) + +Deletes a space. Deleting a space cannot be undone. Additionally, since space names must +be unique across Amazon CodeCatalyst, you cannot reuse names of deleted spaces. + +# Arguments +- `name`: The name of the space. To retrieve a list of space names, use ListSpaces. + +""" +function delete_space(name; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "DELETE", + "/v1/spaces/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_space( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "DELETE", + "/v1/spaces/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_dev_environment(id, project_name, space_name) get_dev_environment(id, project_name, space_name, params::Dict{String,<:Any}) @@ -362,6 +510,44 @@ function get_project( ) end +""" + get_source_repository(name, project_name, space_name) + get_source_repository(name, project_name, space_name, params::Dict{String,<:Any}) + +Returns information about a source repository. + +# Arguments +- `name`: The name of the source repository. +- `project_name`: The name of the project in the space. +- `space_name`: The name of the space. + +""" +function get_source_repository( + name, projectName, spaceName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_source_repository( + name, + projectName, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "GET", + "/v1/spaces/$(spaceName)/projects/$(projectName)/sourceRepositories/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_source_repository_clone_urls(project_name, source_repository_name, space_name) get_source_repository_clone_urls(project_name, source_repository_name, space_name, params::Dict{String,<:Any}) @@ -1062,6 +1248,76 @@ function update_dev_environment( ) end +""" + update_project(name, space_name) + update_project(name, space_name, params::Dict{String,<:Any}) + +Changes one or more values for a project. + +# Arguments +- `name`: The name of the project. +- `space_name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the project. +""" +function update_project(name, spaceName; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "PATCH", + "/v1/spaces/$(spaceName)/projects/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_project( + name, + spaceName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codecatalyst( + "PATCH", + "/v1/spaces/$(spaceName)/projects/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_space(name) + update_space(name, params::Dict{String,<:Any}) + +Changes one or more values for a space. + +# Arguments +- `name`: The name of the space. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the space. +""" +function update_space(name; aws_config::AbstractAWSConfig=global_aws_config()) + return codecatalyst( + "PATCH", + "/v1/spaces/$(name)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_space( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codecatalyst( + "PATCH", + "/v1/spaces/$(name)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ verify_session() verify_session(params::Dict{String,<:Any}) diff --git a/src/services/codeguru_security.jl b/src/services/codeguru_security.jl index b90dacb3b9..9c85947d47 100644 --- a/src/services/codeguru_security.jl +++ b/src/services/codeguru_security.jl @@ -59,7 +59,7 @@ Use to create a scan using code uploaded to an S3 bucket. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"analysisType"`: The type of analysis you want CodeGuru Security to perform in the scan, - either Security or All. The Secuirty type only generates findings related to security. The + either Security or All. The Security type only generates findings related to security. The All type generates both security findings and quality findings. Defaults to Security type if missing. - `"clientToken"`: The idempotency token for the request. Amazon CodeGuru Security uses @@ -349,7 +349,7 @@ end list_scans() list_scans(params::Dict{String,<:Any}) -Returns a list of all the scans in an account. +Returns a list of all the standard scans in an account. Does not return express scans. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/comprehendmedical.jl b/src/services/comprehendmedical.jl index e914442548..0ee800160b 100644 --- a/src/services/comprehendmedical.jl +++ b/src/services/comprehendmedical.jl @@ -12,7 +12,7 @@ Gets the properties associated with a medical entities detection job. Use this o get the status of a detection job. # Arguments -- `job_id`: The identifier that Comprehend Medical; generated for the job. The +- `job_id`: The identifier that Amazon Comprehend Medical generated for the job. The StartEntitiesDetectionV2Job operation returns this identifier in its response. """ @@ -78,7 +78,7 @@ Gets the properties associated with a protected health information (PHI) detecti this operation to get the status of a detection job. # Arguments -- `job_id`: The identifier that Comprehend Medical; generated for the job. The +- `job_id`: The identifier that Amazon Comprehend Medical generated for the job. The StartPHIDetectionJob operation returns this identifier in its response. """ @@ -172,13 +172,12 @@ end detect_entities(text, params::Dict{String,<:Any}) The DetectEntities operation is deprecated. You should use the DetectEntitiesV2 operation -instead. Inspects the clinical text for a variety of medical entities and returns specific +instead. Inspects the clinical text for a variety of medical entities and returns specific information about them such as entity category, location, and confidence score on that -information . +information. # Arguments - `text`: A UTF-8 text string containing the clinical content being examined for entities. - Each string must contain fewer than 20,000 bytes of characters. """ function detect_entities(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -214,8 +213,7 @@ operation in all new applications. The DetectEntitiesV2 operation returns the Ac Direction entities as attributes instead of types. # Arguments -- `text`: A UTF-8 string containing the clinical content being examined for entities. Each - string must contain fewer than 20,000 bytes of characters. +- `text`: A UTF-8 string containing the clinical content being examined for entities. """ function detect_entities_v2(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -241,13 +239,13 @@ end detect_phi(text) detect_phi(text, params::Dict{String,<:Any}) - Inspects the clinical text for protected health information (PHI) entities and returns the +Inspects the clinical text for protected health information (PHI) entities and returns the entity category, location, and confidence score for each entity. Amazon Comprehend Medical only detects entities in English language texts. # Arguments -- `text`: A UTF-8 text string containing the clinical content being examined for PHI - entities. Each string must contain fewer than 20,000 bytes of characters. +- `text`: A UTF-8 text string containing the clinical content being examined for PHI + entities. """ function detect_phi(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -279,8 +277,7 @@ Centers for Disease Control. Amazon Comprehend Medical only detects medical enti English language texts. # Arguments -- `text`: The input text used for analysis. The input for InferICD10CM is a string from 1 - to 10000 characters. +- `text`: The input text used for analysis. """ function infer_icd10_cm(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -312,8 +309,7 @@ Medicine. Amazon Comprehend Medical only detects medical entities in English lan texts. # Arguments -- `text`: The input text used for analysis. The input for InferRxNorm is a string from 1 to - 10000 characters. +- `text`: The input text used for analysis. """ function infer_rx_norm(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -343,8 +339,7 @@ end the Systematized Nomenclature of Medicine, Clinical Terms (SNOMED-CT) ontology # Arguments -- `text`: The input text to be analyzed using InferSNOMEDCT. The text should be a string - with 1 to 10000 characters. +- `text`: The input text to be analyzed using InferSNOMEDCT. """ function infer_snomedct(Text; aws_config::AbstractAWSConfig=global_aws_config()) @@ -434,7 +429,7 @@ end list_phidetection_jobs() list_phidetection_jobs(params::Dict{String,<:Any}) -Gets a list of protected health information (PHI) detection jobs that you have submitted. +Gets a list of protected health information (PHI) detection jobs you have submitted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -528,19 +523,19 @@ DescribeEntitiesDetectionV2Job operation to track the status of a job. # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: The input configuration that specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same - language. Comprehend Medical; processes files in US English (en). + language. Amazon Comprehend Medical processes files in US English (en). - `output_data_config`: The output configuration that specifies where to send the output files. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one for you. + request token, Amazon Comprehend Medical generates one for you. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -602,8 +597,8 @@ ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -612,7 +607,7 @@ ontology. Use the DescribeICD10CMInferenceJob operation to track the status of a # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -674,8 +669,8 @@ DescribePHIDetectionJob operation to track the status of a job. # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -684,7 +679,7 @@ DescribePHIDetectionJob operation to track the status of a job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. @@ -746,8 +741,8 @@ ontology. Use the DescribeRxNormInferenceJob operation to track the status of a # Arguments - `data_access_role_arn`: The Amazon Resource Name (ARN) of the AWS Identity and Access - Management (IAM) role that grants Comprehend Medical; read access to your input data. For - more information, see Role-Based Permissions Required for Asynchronous Operations. + Management (IAM) role that grants Amazon Comprehend Medical read access to your input data. + For more information, see Role-Based Permissions Required for Asynchronous Operations. - `input_data_config`: Specifies the format and location of the input data for the job. - `language_code`: The language of the input documents. All documents must be in the same language. @@ -756,7 +751,7 @@ ontology. Use the DescribeRxNormInferenceJob operation to track the status of a # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: A unique identifier for the request. If you don't set the client - request token, Comprehend Medical; generates one. + request token, Amazon Comprehend Medical generates one. - `"JobName"`: The identifier of the job. - `"KMSKey"`: An AWS Key Management Service key to encrypt your output files. If you do not specify a key, the files are written in plain text. diff --git a/src/services/connect.jl b/src/services/connect.jl index 1af51b9c73..50c72f6ab9 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -511,7 +511,18 @@ Connect instance or traffic distribution group was created. For more information to use this operation, see Claim a phone number in your country and Claim phone numbers to traffic distribution groups in the Amazon Connect Administrator Guide. You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the -DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. +DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. If +you plan to claim and release numbers frequently during a 30 day period, contact us for a +service quota exception. Otherwise, it is possible you will be blocked from claiming and +releasing any more numbers until 30 days past the oldest number released has expired. By +default you can claim and release up to 200% of your maximum number of active phone numbers +during any 30 day period. If you claim and release phone numbers using the UI or API during +a rolling 30 day cycle that exceeds 200% of your phone number service level quota, you will +be blocked from claiming any more numbers until 30 days past the oldest number released has +expired. For example, if you already have 99 claimed numbers and a service level quota of +99 phone numbers, and in any 30 day period you release 99, claim 99, and then release 99, +you will have exceeded the 200% limit. At that point you are blocked from claiming any more +numbers until you open an Amazon Web Services support ticket. # Arguments - `phone_number`: The phone number you want to claim. Phone numbers are formatted [+] @@ -1109,7 +1120,9 @@ the OutboundCallerConfig request body parameter. However, if the number is claim traffic distribution group and you are calling this API using an instance in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a -ResourceNotFoundException. +ResourceNotFoundException. Only use the phone number ARN format that doesn't contain +instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. +This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API. # Arguments - `hours_of_operation_id`: The identifier for the hours of operation. @@ -2162,6 +2175,43 @@ function delete_prompt( ) end +""" + delete_queue(instance_id, queue_id) + delete_queue(instance_id, queue_id, params::Dict{String,<:Any}) + +Deletes a queue. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `queue_id`: The identifier for the queue. + +""" +function delete_queue( + InstanceId, QueueId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/queues/$(InstanceId)/$(QueueId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue( + InstanceId, + QueueId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/queues/$(InstanceId)/$(QueueId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_quick_connect(instance_id, quick_connect_id) delete_quick_connect(instance_id, quick_connect_id, params::Dict{String,<:Any}) @@ -2199,6 +2249,43 @@ function delete_quick_connect( ) end +""" + delete_routing_profile(instance_id, routing_profile_id) + delete_routing_profile(instance_id, routing_profile_id, params::Dict{String,<:Any}) + +Deletes a routing profile. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. +- `routing_profile_id`: The identifier of the routing profile. + +""" +function delete_routing_profile( + InstanceId, RoutingProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "DELETE", + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_routing_profile( + InstanceId, + RoutingProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "DELETE", + "/routing-profiles/$(InstanceId)/$(RoutingProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_rule(instance_id, rule_id) delete_rule(instance_id, rule_id, params::Dict{String,<:Any}) @@ -4114,17 +4201,20 @@ definitions in the Amazon Connect Administrator's Guide. start time timestamp. It cannot be later than the current timestamp. The time range between the start and end time must be less than 24 hours. - `filters`: The filters to apply to returned metrics. You can filter on the following - resources: Queues Routing profiles Agents Channels User hierarchy groups At - least one filter must be passed from queues, routing profiles, agents, or user hierarchy - groups. To filter by phone number, see Create a historical metrics report in the Amazon - Connect Administrator's Guide. Note the following limits: Filter keys: A maximum of 5 - filter keys are supported in a single request. Valid filter keys: QUEUE | ROUTING_PROFILE | - AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | - AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE - Filter values: A maximum of 100 filter values are supported in a single request. For - example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing - profiles for a total of 100 filter values. VOICE, CHAT, and TASK are valid filterValue for - the CHANNEL filter key. + resources: Queues Routing profiles Agents Channels User hierarchy groups + Feature At least one filter must be passed from queues, routing profiles, agents, or user + hierarchy groups. To filter by phone number, see Create a historical metrics report in the + Amazon Connect Administrator's Guide. Note the following limits: Filter keys: A maximum + of 5 filter keys are supported in a single request. Valid filter keys: QUEUE | + ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | + AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + FEATURE Filter values: A maximum of 100 filter values are supported in a single + request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do + not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request + can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter + values, along with 3 channel filters. contact_lens_conversational_analytics is a valid + filterValue for the FEATURE filter key. It is available only to contacts analyzed by + Contact Lens conversational analytics. - `metrics`: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide. @@ -4141,30 +4231,62 @@ definitions in the Amazon Connect Administrator's Guide. Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For - now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | - CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy AVG_HOLD_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy - AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy AVG_INTERACTION_TIME Unit: Seconds Valid - groupings and filters: Queue, Channel, Routing Profile AVG_QUEUE_ANSWER_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile CONTACTS_ABANDONED - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid - groupings and filters: Queue, Channel, Routing Profile CONTACTS_HANDLED Unit: Count Valid - metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_HOLD_ABANDONS Unit: - Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature Feature is a + valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid + metric filter key: INITIATION_METHOD. For now, this metric only supports the following as + INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy AVG_AGENT_CONNECTING_TIME Unit: Seconds + Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the + following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy AVG_CONTACT_DURATION + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Feature Feature is a valid filter but not a valid grouping. + AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy AVG_GREETING_TIME_AGENT This metric is available + only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature Feature is a valid filter but not a valid + grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy, Feature Feature is a valid filter but not a valid + grouping. AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature Feature is a valid filter but not a valid + grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy AVG_INTERACTION_TIME Unit: + Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature Feature is a + valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available + only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy AVG_NON_TALK_TIME This metric is available only + for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing + Profile, Feature Feature is a valid filter but not a valid grouping. AVG_TALK_TIME This + metric is available only for contacts analyzed by Contact Lens conversational analytics. + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by + Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy AVG_TALK_TIME_CUSTOMER This metric is + available only for contacts analyzed by Contact Lens conversational analytics. Unit: + Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy CONTACTS_ABANDONED Unit: Count Valid groupings and filters: Queue, Channel, + Routing Profile, Agent, Agent Hierarchy CONTACTS_CREATED Unit: Count Valid metric filter + key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, + Feature Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: + Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature Feature is a + valid filter but not a valid grouping. CONTACTS_HOLD_ABANDONS Unit: Count Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy CONTACTS_TRANSFERRED_OUT_BY_AGENT - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy MAX_QUEUED_TIME Unit: Seconds - Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature Feature is a valid filter + but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings + and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy + CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy MAX_QUEUED_TIME Unit: Seconds Valid + groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must @@ -5937,7 +6059,18 @@ number was claimed. To release phone numbers from a traffic distribution group, ReleasePhoneNumber API, not the Amazon Connect console. After releasing a phone number, the phone number enters into a cooldown period of 30 days. It cannot be searched for or claimed again until the period has ended. If you accidentally release a phone number, contact -Amazon Web Services Support. +Amazon Web Services Support. If you plan to claim and release numbers frequently during a +30 day period, contact us for a service quota exception. Otherwise, it is possible you will +be blocked from claiming and releasing any more numbers until 30 days past the oldest +number released has expired. By default you can claim and release up to 200% of your +maximum number of active phone numbers during any 30 day period. If you claim and release +phone numbers using the UI or API during a rolling 30 day cycle that exceeds 200% of your +phone number service level quota, you will be blocked from claiming any more numbers until +30 days past the oldest number released has expired. For example, if you already have 99 +claimed numbers and a service level quota of 99 phone numbers, and in any 30 day period you +release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that +point you are blocked from claiming any more numbers until you open an Amazon Web Services +support ticket. # Arguments - `phone_number_id`: A unique identifier for the phone number. @@ -6046,8 +6179,8 @@ end resume_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) When a contact is being recorded, and the recording has been suspended using -SuspendContactRecording, this API resumes recording the call. Only voice recordings are -supported at this time. +SuspendContactRecording, this API resumes recording the call or screen. Voice and screen +recordings are supported. # Arguments - `contact_id`: The identifier of the contact. @@ -6260,8 +6393,7 @@ end search_queues(instance_id) search_queues(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches queues -in an Amazon Connect instance, with optional filtering. +Searches queues in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6348,12 +6480,56 @@ function search_quick_connects( ) end +""" + search_resource_tags(instance_id) + search_resource_tags(instance_id, params::Dict{String,<:Any}) + +Searches tags used in an Amazon Connect instance using optional search criteria. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"ResourceTypes"`: The list of resource types to be used to search tags from. If not + provided or if any empty list is provided, this API will search from all supported resource + types. +- `"SearchCriteria"`: The search criteria to be used to return tags. +""" +function search_resource_tags(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) + return connect( + "POST", + "/search-resource-tags", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_resource_tags( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-resource-tags", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_routing_profiles(instance_id) search_routing_profiles(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches -routing profiles in an Amazon Connect instance, with optional filtering. +Searches routing profiles in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6401,8 +6577,7 @@ end search_security_profiles(instance_id) search_security_profiles(instance_id, params::Dict{String,<:Any}) -This API is in preview release for Amazon Connect and is subject to change. Searches -security profiles in an Amazon Connect instance, with optional filtering. +Searches security profiles in an Amazon Connect instance, with optional filtering. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -7222,11 +7397,11 @@ end suspend_contact_recording(contact_id, initial_contact_id, instance_id) suspend_contact_recording(contact_id, initial_contact_id, instance_id, params::Dict{String,<:Any}) -When a contact is being recorded, this API suspends recording the call. For example, you -might suspend the call recording while collecting sensitive information, such as a credit -card number. Then use ResumeContactRecording to restart recording. The period of time that -the recording is suspended is filled with silence in the final recording. Only voice -recordings are supported at this time. +When a contact is being recorded, this API suspends recording the call or screen. For +example, you might suspend the call or screen recording while collecting sensitive +information, such as a credit card number. Then use ResumeContactRecording to restart +recording. The period of time that the recording is suspended is filled with silence in the +final recording. Voice and screen recordings are supported. # Arguments - `contact_id`: The identifier of the contact. @@ -8418,7 +8593,10 @@ for the OutboundCallerIdNumberId value of the OutboundCallerConfig request body However, if the number is claimed to a traffic distribution group and you are calling this API using an instance in the alternate Amazon Web Services Region associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided -in this scenario, you will receive a ResourceNotFoundException. +in this scenario, you will receive a ResourceNotFoundException. Only use the phone number +ARN format that doesn't contain instance in the path, for example, +arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is +returned when you call the ListPhoneNumbersV2 API. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance diff --git a/src/services/connectcases.jl b/src/services/connectcases.jl index 72ed9e2d6d..f38a78d44a 100644 --- a/src/services/connectcases.jl +++ b/src/services/connectcases.jl @@ -151,7 +151,7 @@ Creates a domain, which is a container for all case data, such as cases, fields, and layouts. Each Amazon Connect instance can be associated with only one Cases domain. This will not associate your connect instance to Cases domain. Instead, use the Amazon Connect CreateIntegrationAssociation API. You need specific IAM permissions to successfully -associate the Cases domain. For more information, see Onboard to Cases. +associate the Cases domain. For more information, see Onboard to Cases. </important> # Arguments - `name`: The name for your Cases domain. It must be unique for your Amazon Web Services @@ -371,7 +371,11 @@ end delete_domain(domain_id) delete_domain(domain_id, params::Dict{String,<:Any}) -Deletes a domain. +Deletes a Cases domain. <note> <p>After deleting your domain you must +disassociate the deleted domain from your Amazon Connect instance with another API call +before being able to use Cases again with this Amazon Connect instance. See <a +href="https://docs.aws.amazon.com/connect/latest/APIReference/API_DeleteIntegrationAsso +ciation.html">DeleteIntegrationAssociation</a>.</p> </note> # Arguments - `domain_id`: The unique identifier of the Cases domain. diff --git a/src/services/cost_explorer.jl b/src/services/cost_explorer.jl index 1d3ba7ccfe..cb510c267e 100644 --- a/src/services/cost_explorer.jl +++ b/src/services/cost_explorer.jl @@ -1111,6 +1111,47 @@ function get_rightsizing_recommendation( ) end +""" + get_savings_plan_purchase_recommendation_details(recommendation_detail_id) + get_savings_plan_purchase_recommendation_details(recommendation_detail_id, params::Dict{String,<:Any}) + +Retrieves the details for a Savings Plan recommendation. These details include the hourly +data-points that construct the new cost, coverage, and utilization charts. + +# Arguments +- `recommendation_detail_id`: The ID that is associated with the Savings Plan + recommendation. + +""" +function get_savings_plan_purchase_recommendation_details( + RecommendationDetailId; aws_config::AbstractAWSConfig=global_aws_config() +) + return cost_explorer( + "GetSavingsPlanPurchaseRecommendationDetails", + Dict{String,Any}("RecommendationDetailId" => RecommendationDetailId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_savings_plan_purchase_recommendation_details( + RecommendationDetailId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cost_explorer( + "GetSavingsPlanPurchaseRecommendationDetails", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("RecommendationDetailId" => RecommendationDetailId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_savings_plans_coverage(time_period) get_savings_plans_coverage(time_period, params::Dict{String,<:Any}) @@ -1869,7 +1910,9 @@ end update_anomaly_subscription(subscription_arn) update_anomaly_subscription(subscription_arn, params::Dict{String,<:Any}) -Updates an existing cost anomaly monitor subscription. +Updates an existing cost anomaly subscription. Specify the fields that you want to update. +Omitted fields are unchanged. The JSON below describes the generic construct for each +type. See Request Parameters for possible values as they apply to AnomalySubscription. # Arguments - `subscription_arn`: A cost anomaly subscription Amazon Resource Name (ARN). @@ -1883,20 +1926,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Threshold"`: (deprecated) The update to the threshold value for receiving notifications. This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a - ThresholdExpression. + ThresholdExpression. You can specify either Threshold or ThresholdExpression, but not both. - `"ThresholdExpression"`: The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and - ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The - match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and - 10,000,000,000. The following are examples of valid ThresholdExpressions: Absolute - threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", - \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } Percentage - threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", - \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } AND two - thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": + ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and + TotalImpactPercentage, respectively (see Impact for more details). The supported nested + expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values + must be numbers between 0 and 10,000,000,000 in string format. You can specify either + Threshold or ThresholdExpression, but not both. The following are examples of valid + ThresholdExpressions: Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], - \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": + \"Values\": [ \"100\" ] } } Percentage threshold: { \"Dimensions\": { \"Key\": + \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], + \"Values\": [ \"100\" ] } } AND two thresholds together: { \"And\": [ { \"Dimensions\": + { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" + ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] } OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ diff --git a/src/services/customer_profiles.jl b/src/services/customer_profiles.jl index f01d0f21bd..771569d418 100644 --- a/src/services/customer_profiles.jl +++ b/src/services/customer_profiles.jl @@ -140,7 +140,7 @@ attributes, object types, profile keys, and encryption keys. You can create mult domains, and each domain can have multiple third-party integrations. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. Use this API or UpdateDomain to enable identity resolution: set -Matching to true. To prevent cross-service impersonation when you call this API, see +Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. # Arguments @@ -164,6 +164,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3. +- `"RuleBasedMatching"`: The process of matching duplicate profiles using the Rule-Based + matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match + and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the + results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you + can download the results from S3. - `"Tags"`: The tags used to organize, track, or control access for this resource. """ function create_domain( @@ -1161,6 +1167,69 @@ function get_profile_object_type_template( ) end +""" + get_similar_profiles(domain_name, match_type, search_key, search_value) + get_similar_profiles(domain_name, match_type, search_key, search_value, params::Dict{String,<:Any}) + +Returns a set of profiles that belong to the same matching group using the matchId or +profileId. You can also specify the type of matching that you want for finding similar +profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING. + +# Arguments +- `domain_name`: The unique name of the domain. +- `match_type`: Specify the type of matching to get similar profiles for. +- `search_key`: The string indicating the search key to be used. +- `search_value`: The string based on SearchKey to be searched for similar profiles. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of objects returned per page. +- `"next-token"`: The pagination token from the previous GetSimilarProfiles API call. +""" +function get_similar_profiles( + DomainName, + MatchType, + SearchKey, + SearchValue; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/matches", + Dict{String,Any}( + "MatchType" => MatchType, "SearchKey" => SearchKey, "SearchValue" => SearchValue + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_similar_profiles( + DomainName, + MatchType, + SearchKey, + SearchValue, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "POST", + "/domains/$(DomainName)/matches", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "MatchType" => MatchType, + "SearchKey" => SearchKey, + "SearchValue" => SearchValue, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_workflow(domain_name, workflow_id) get_workflow(domain_name, workflow_id, params::Dict{String,<:Any}) @@ -1610,6 +1679,44 @@ function list_profile_objects( ) end +""" + list_rule_based_matches(domain_name) + list_rule_based_matches(domain_name, params::Dict{String,<:Any}) + +Returns a set of MatchIds that belong to the given domain. + +# Arguments +- `domain_name`: The unique name of the domain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"max-results"`: The maximum number of MatchIds returned per page. +- `"next-token"`: The pagination token from the previous ListRuleBasedMatches API call. +""" +function list_rule_based_matches( + DomainName; aws_config::AbstractAWSConfig=global_aws_config() +) + return customer_profiles( + "GET", + "/domains/$(DomainName)/profiles/ruleBasedMatches"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_rule_based_matches( + DomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return customer_profiles( + "GET", + "/domains/$(DomainName)/profiles/ruleBasedMatches", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -2121,7 +2228,7 @@ end Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key. After a domain is created, the name can’t be changed. Use this API or -CreateDomain to enable identity resolution: set Matching to true. To prevent cross-service +CreateDomain to enable identity resolution: set Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. To add or remove tags on an existing Domain, see TagResource/UntagResource. @@ -2149,6 +2256,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3. +- `"RuleBasedMatching"`: The process of matching duplicate profiles using the rule-Based + matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match + and merge your profiles according to your configuration in the RuleBasedMatchingRequest. + You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the + results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you + can download the results from S3. - `"Tags"`: The tags used to organize, track, or control access for this resource. """ function update_domain(DomainName; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/database_migration_service.jl b/src/services/database_migration_service.jl index c1b7248ec3..0d20568bf1 100644 --- a/src/services/database_migration_service.jl +++ b/src/services/database_migration_service.jl @@ -299,6 +299,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide. - `"Tags"`: One or more tags to be assigned to the endpoint. +- `"TimestreamSettings"`: Settings in JSON format for the target Amazon Timestream endpoint. - `"Username"`: The user name to be used to log in to the endpoint database. """ function create_endpoint( @@ -477,6 +478,101 @@ function create_fleet_advisor_collector( ) end +""" + create_replication_config(compute_config, replication_config_identifier, replication_type, source_endpoint_arn, table_mappings, target_endpoint_arn) + create_replication_config(compute_config, replication_config_identifier, replication_type, source_endpoint_arn, table_mappings, target_endpoint_arn, params::Dict{String,<:Any}) + +Creates a configuration that you can later provide to configure and start an DMS Serverless +replication. You can also provide options to validate the configuration inputs before you +start the replication. + +# Arguments +- `compute_config`: Configuration parameters for provisioning an DMS Serverless replication. +- `replication_config_identifier`: A unique identifier that you want to use to create a + ReplicationConfigArn that is returned as part of the output from this action. You can then + pass this output ReplicationConfigArn as the value of the ReplicationConfigArn option for + other actions to identify both DMS Serverless replications and replication configurations + that you want those actions to operate on. For some actions, you can also use either this + unique identifier or a corresponding ARN in action filters to identify the specific + replication and replication configuration to operate on. +- `replication_type`: The type of DMS Serverless replication to provision using this + replication configuration. Possible values: \"full-load\" \"cdc\" + \"full-load-and-cdc\" +- `source_endpoint_arn`: The Amazon Resource Name (ARN) of the source endpoint for this DMS + Serverless replication configuration. +- `table_mappings`: JSON table mappings for DMS Serverless replications that are + provisioned using this replication configuration. For more information, see Specifying + table selection and transformations rules using JSON. +- `target_endpoint_arn`: The Amazon Resource Name (ARN) of the target endpoint for this DMS + serverless replication configuration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ReplicationSettings"`: Optional JSON settings for DMS Serverless replications that are + provisioned using this replication configuration. For example, see Change processing + tuning settings. +- `"ResourceIdentifier"`: Optional unique value or name that you set for a given resource + that can be used to construct an Amazon Resource Name (ARN) for that resource. For more + information, see Fine-grained access control using resource names and tags. +- `"SupplementalSettings"`: Optional JSON settings for specifying supplemental data. For + more information, see Specifying supplemental data for task settings. +- `"Tags"`: One or more optional tags associated with resources used by the DMS Serverless + replication. For more information, see Tagging resources in Database Migration Service. +""" +function create_replication_config( + ComputeConfig, + ReplicationConfigIdentifier, + ReplicationType, + SourceEndpointArn, + TableMappings, + TargetEndpointArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateReplicationConfig", + Dict{String,Any}( + "ComputeConfig" => ComputeConfig, + "ReplicationConfigIdentifier" => ReplicationConfigIdentifier, + "ReplicationType" => ReplicationType, + "SourceEndpointArn" => SourceEndpointArn, + "TableMappings" => TableMappings, + "TargetEndpointArn" => TargetEndpointArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_replication_config( + ComputeConfig, + ReplicationConfigIdentifier, + ReplicationType, + SourceEndpointArn, + TableMappings, + TargetEndpointArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "CreateReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ComputeConfig" => ComputeConfig, + "ReplicationConfigIdentifier" => ReplicationConfigIdentifier, + "ReplicationType" => ReplicationType, + "SourceEndpointArn" => SourceEndpointArn, + "TableMappings" => TableMappings, + "TargetEndpointArn" => TargetEndpointArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_replication_instance(replication_instance_class, replication_instance_identifier) create_replication_instance(replication_instance_class, replication_instance_identifier, params::Dict{String,<:Any}) @@ -492,7 +588,8 @@ Use DMS. as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\". For more information on the settings and capacities for the available replication instance classes, see - Selecting the right DMS replication instance for your migration. + Choosing the right DMS replication instance; and, Selecting the best size for a replication + instance. - `replication_instance_identifier`: The replication instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain 1-63 alphanumeric characters or hyphens. First character must be a letter. Can't end with a hyphen or contain two @@ -512,7 +609,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter. - `"AvailabilityZone"`: The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's - Amazon Web Services Region, for example: us-east-1d + Amazon Web Services Region, for example: us-east-1d. - `"DnsNameServers"`: A list of custom DNS name servers supported for the replication instance to access your on-premise source or target database. This list overrides the default name servers supported by the replication instance. You can specify a @@ -599,7 +696,11 @@ end Creates a replication subnet group given a list of the subnet IDs in a VPC. The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs -exception. +exception. If a replication subnet group exists in your Amazon Web Services account, the +CreateReplicationSubnetGroup action returns the following error message: The Replication +Subnet Group already exists. In this case, delete the existing replication subnet group. To +do so, use the DeleteReplicationSubnetGroup action. Optionally, choose Subnet groups in the +DMS console, then choose your subnet group. Next, choose Delete from Actions. # Arguments - `replication_subnet_group_description`: The description for the subnet group. @@ -996,6 +1097,48 @@ function delete_fleet_advisor_databases( ) end +""" + delete_replication_config(replication_config_arn) + delete_replication_config(replication_config_arn, params::Dict{String,<:Any}) + +Deletes an DMS Serverless replication configuration. This effectively deprovisions any and +all replications that use this configuration. You can't delete the configuration for an DMS +Serverless replication that is ongoing. You can delete the configuration when the +replication is in a non-RUNNING and non-STARTING state. + +# Arguments +- `replication_config_arn`: The replication config to delete. + +""" +function delete_replication_config( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DeleteReplicationConfig", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_replication_config( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DeleteReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_replication_instance(replication_instance_arn) delete_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) @@ -1328,7 +1471,7 @@ Returns information about the possible endpoint settings available when you crea endpoint for a specific database engine. # Arguments -- `engine_name`: The databse engine used for your source or target endpoint. +- `engine_name`: The database engine used for your source or target endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1919,6 +2062,39 @@ function describe_refresh_schemas_status( ) end +""" + describe_replication_configs() + describe_replication_configs(params::Dict{String,<:Any}) + +Returns one or more existing DMS Serverless replication configurations as a list of +structures. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the replication configs. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replication_configs(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeReplicationConfigs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_replication_configs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationConfigs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_replication_instance_task_logs(replication_instance_arn) describe_replication_instance_task_logs(replication_instance_arn, params::Dict{String,<:Any}) @@ -2042,6 +2218,55 @@ function describe_replication_subnet_groups( ) end +""" + describe_replication_table_statistics(replication_config_arn) + describe_replication_table_statistics(replication_config_arn, params::Dict{String,<:Any}) + +Returns table and schema statistics for one or more provisioned replications that use a +given DMS Serverless replication configuration. + +# Arguments +- `replication_config_arn`: The replication config to describe. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the replication table statistics. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replication_table_statistics( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplicationTableStatistics", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_replication_table_statistics( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "DescribeReplicationTableStatistics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_replication_task_assessment_results() describe_replication_task_assessment_results(params::Dict{String,<:Any}) @@ -2204,6 +2429,39 @@ function describe_replication_tasks( ) end +""" + describe_replications() + describe_replications(params::Dict{String,<:Any}) + +Provides details on replication progress by returning status information for one or more +provisioned DMS Serverless replications. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters applied to the replications. +- `"Marker"`: An optional pagination token provided by a previous request. If this + parameter is specified, the response includes only records beyond the marker, up to the + value specified by MaxRecords. +- `"MaxRecords"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a pagination token called a marker is included + in the response so that the remaining results can be retrieved. +""" +function describe_replications(; aws_config::AbstractAWSConfig=global_aws_config()) + return database_migration_service( + "DescribeReplications"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_replications( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "DescribeReplications", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_schemas(endpoint_arn) describe_schemas(endpoint_arn, params::Dict{String,<:Any}) @@ -2492,6 +2750,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys For information about other available settings, see Extra connection attributes when using SAP ASE as a source for DMS and Extra connection attributes when using SAP ASE as a target for DMS in the Database Migration Service User Guide. +- `"TimestreamSettings"`: Settings in JSON format for the target Amazon Timestream endpoint. - `"Username"`: The user name to be used to login to the endpoint database. """ function modify_endpoint(EndpointArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2564,6 +2823,64 @@ function modify_event_subscription( ) end +""" + modify_replication_config(replication_config_arn) + modify_replication_config(replication_config_arn, params::Dict{String,<:Any}) + +Modifies an existing DMS Serverless replication configuration that you can use to start a +replication. This command includes input validation and logic to check the state of any +replication that uses this configuration. You can only modify a replication configuration +before any replication that uses it has started. As soon as you have initially started a +replication with a given configuiration, you can't modify that configuration, even if you +stop it. Other run statuses that allow you to run this command include FAILED and CREATED. +A provisioning state that allows you to run this command is FAILED_PROVISION. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication to modify. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ComputeConfig"`: Configuration parameters for provisioning an DMS Serverless + replication. +- `"ReplicationConfigIdentifier"`: The new replication config to apply to the replication. +- `"ReplicationSettings"`: The settings for the replication. +- `"ReplicationType"`: The type of replication. +- `"SourceEndpointArn"`: The Amazon Resource Name (ARN) of the source endpoint for this DMS + serverless replication configuration. +- `"SupplementalSettings"`: Additional settings for the replication. +- `"TableMappings"`: Table mappings specified in the replication. +- `"TargetEndpointArn"`: The Amazon Resource Name (ARN) of the target endpoint for this DMS + serverless replication configuration. +""" +function modify_replication_config( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ModifyReplicationConfig", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_replication_config( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ModifyReplicationConfig", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_replication_instance(replication_instance_arn) modify_replication_instance(replication_instance_arn, params::Dict{String,<:Any}) @@ -2928,6 +3245,61 @@ function refresh_schemas( ) end +""" + reload_replication_tables(replication_config_arn, tables_to_reload) + reload_replication_tables(replication_config_arn, tables_to_reload, params::Dict{String,<:Any}) + +Reloads the target database table with the source data for a given DMS Serverless +replication configuration. You can only use this operation with a task in the RUNNING +state, otherwise the service will throw an InvalidResourceStateFault exception. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication config for which to + reload tables. +- `tables_to_reload`: The list of tables to reload. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ReloadOption"`: Options for reload. Specify data-reload to reload the data and + re-validate it if validation is enabled. Specify validate-only to re-validate the table. + This option applies only when validation is enabled for the replication. +""" +function reload_replication_tables( + ReplicationConfigArn, TablesToReload; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "ReloadReplicationTables", + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "TablesToReload" => TablesToReload, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reload_replication_tables( + ReplicationConfigArn, + TablesToReload, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "ReloadReplicationTables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "TablesToReload" => TablesToReload, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ reload_tables(replication_task_arn, tables_to_reload) reload_tables(replication_task_arn, tables_to_reload, params::Dict{String,<:Any}) @@ -3097,6 +3469,71 @@ function start_recommendations( ) end +""" + start_replication(replication_config_arn, start_replication_type) + start_replication(replication_config_arn, start_replication_type, params::Dict{String,<:Any}) + +For a given DMS Serverless replication configuration, DMS connects to the source endpoint +and collects the metadata to analyze the replication workload. Using this metadata, DMS +then computes and provisions the required capacity and starts replicating to the target +endpoint using the server resources that DMS has provisioned for the DMS Serverless +replication. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication for which to start + replication. +- `start_replication_type`: The replication type. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CdcStartPosition"`: Indicates when you want a change data capture (CDC) operation to + start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation + to start. Specifying both values results in an error. The value can be in date, checkpoint, + or LSN/SCN format. +- `"CdcStartTime"`: Indicates the start time for a change data capture (CDC) operation. Use + either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. + Specifying both values results in an error. +- `"CdcStopPosition"`: Indicates when you want a change data capture (CDC) operation to + stop. The value can be either server time or commit time. +""" +function start_replication( + ReplicationConfigArn, + StartReplicationType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StartReplication", + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "StartReplicationType" => StartReplicationType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_replication( + ReplicationConfigArn, + StartReplicationType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StartReplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ReplicationConfigArn" => ReplicationConfigArn, + "StartReplicationType" => StartReplicationType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_replication_task(replication_task_arn, start_replication_task_type) start_replication_task(replication_task_arn, start_replication_task_type, params::Dict{String,<:Any}) @@ -3314,6 +3751,46 @@ function start_replication_task_assessment_run( ) end +""" + stop_replication(replication_config_arn) + stop_replication(replication_config_arn, params::Dict{String,<:Any}) + +For a given DMS Serverless replication configuration, DMS stops any and all ongoing DMS +Serverless replications. This command doesn't deprovision the stopped replications. + +# Arguments +- `replication_config_arn`: The Amazon Resource Name of the replication to stop. + +""" +function stop_replication( + ReplicationConfigArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return database_migration_service( + "StopReplication", + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_replication( + ReplicationConfigArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return database_migration_service( + "StopReplication", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReplicationConfigArn" => ReplicationConfigArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_replication_task(replication_task_arn) stop_replication_task(replication_task_arn, params::Dict{String,<:Any}) diff --git a/src/services/datasync.jl b/src/services/datasync.jl index dd491b6198..1c137b4462 100644 --- a/src/services/datasync.jl +++ b/src/services/datasync.jl @@ -13,7 +13,8 @@ DataSync Discovery to collect information about. # Arguments - `agent_arns`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that - connects to and reads from your on-premises storage system's management interface. + connects to and reads from your on-premises storage system's management interface. You can + only specify one ARN. - `client_token`: Specifies a client token to make sure requests with this API operation are idempotent. If you don't specify a client token, DataSync generates one for you automatically. @@ -131,49 +132,32 @@ end create_agent(activation_key) create_agent(activation_key, params::Dict{String,<:Any}) -Activates an DataSync agent that you have deployed in your storage environment. The -activation process associates your agent with your account. In the activation process, you -specify information such as the Amazon Web Services Region that you want to activate the -agent in. You activate the agent in the Amazon Web Services Region where your target -locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web -Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the -agent access to a VPC endpoint so you can run tasks without going over the public internet. -You can use an agent for more than one location. If a task uses multiple agents, all of -them need to have status AVAILABLE for the task to run. If you use multiple agents for a -source location, the status of all the agents must be AVAILABLE for the task to run. -Agents are automatically updated by Amazon Web Services on a regular basis, using a -mechanism that ensures minimal interruption to your tasks. +Activates an DataSync agent that you've deployed in your storage environment. The +activation process associates the agent with your Amazon Web Services account. If you +haven't deployed an agent yet, see the following topics to learn more: Agent +requirements Create an agent If you're transferring between Amazon Web Services +storage services, you don't need a DataSync agent. # Arguments -- `activation_key`: Your agent activation key. You can get the activation key either by - sending an HTTP GET request with redirects that enable you to get the agent IP address - (port 80). Alternatively, you can get it from the DataSync console. The redirect URL - returned in the response provides you the activation key for your agent in the query string - parameter activationKey. It might also include other activation-related parameters; - however, these are merely defaults. The arguments you pass to this API call determine the - actual configuration of your agent. For more information, see Activating an Agent in the - DataSync User Guide. +- `activation_key`: Specifies your DataSync agent's activation key. If you don't have an + activation key, see Activate your agent. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AgentName"`: The name you configured for your agent. This value is a text reference - that is used to identify the agent in the console. -- `"SecurityGroupArns"`: The ARNs of the security groups used to protect your data transfer - task subnets. See SecurityGroupArns. -- `"SubnetArns"`: The Amazon Resource Names (ARNs) of the subnets in which DataSync will - create elastic network interfaces for each data transfer task. The agent that runs a task - must be private. When you start a task that is associated with an agent created in a VPC, - or one that has access to an IP address in a VPC, then the task is also private. In this - case, DataSync creates four network interfaces for each task in your subnet. For a data - transfer to work, the agent must be able to route to all these four network interfaces. -- `"Tags"`: The key-value pair that represents the tag that you want to associate with the - agent. The value can be an empty string. This value helps you manage, filter, and search - for your agents. Valid characters for key and value are letters, spaces, and numbers - representable in UTF-8 format, and the following special characters: + - = . _ : / @. -- `"VpcEndpointId"`: The ID of the VPC (virtual private cloud) endpoint that the agent has - access to. This is the client-side VPC endpoint, also called a PrivateLink. If you don't - have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service Configuration in the - Amazon VPC User Guide. VPC endpoint ID looks like this: vpce-01234d5aff67890e1. +- `"AgentName"`: Specifies a name for your agent. You can see this name in the DataSync + console. +- `"SecurityGroupArns"`: Specifies the Amazon Resource Name (ARN) of the security group + that protects your task's network interfaces when using a virtual private cloud (VPC) + endpoint. You can only specify one ARN. +- `"SubnetArns"`: Specifies the ARN of the subnet where you want to run your DataSync task + when using a VPC endpoint. This is the subnet where DataSync creates and manages the + network interfaces for your transfer. You can only specify one ARN. +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least one tag for your agent. +- `"VpcEndpointId"`: Specifies the ID of the VPC endpoint that you want your agent to + connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC + endpoint you use must include the DataSync service name (for example, + com.amazonaws.us-east-2.datasync). """ function create_agent(ActivationKey; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -198,6 +182,83 @@ function create_agent( ) end +""" + create_location_azure_blob(agent_arns, authentication_type, container_url) + create_location_azure_blob(agent_arns, authentication_type, container_url, params::Dict{String,<:Any}) + +Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a +transfer source or destination. Before you begin, make sure you know how DataSync accesses +Azure Blob Storage and works with access tiers and blob types. You also need a DataSync +agent that can connect to your container. + +# Arguments +- `agent_arns`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that can + connect with your Azure Blob Storage container. You can specify more than one agent. For + more information, see Using multiple agents for your transfer. +- `authentication_type`: Specifies the authentication method DataSync uses to access your + Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS). +- `container_url`: Specifies the URL of the Azure Blob Storage container involved in your + transfer. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessTier"`: Specifies the access tier that you want your objects or files transferred + into. This only applies when using the location as a transfer destination. For more + information, see Access tiers. +- `"BlobType"`: Specifies the type of blob that you want your objects or files to be when + transferring them into Azure Blob Storage. Currently, DataSync only supports moving data + into Azure Blob Storage as block blobs. For more information on blob types, see the Azure + Blob Storage documentation. +- `"SasConfiguration"`: Specifies the SAS configuration that allows DataSync to access your + Azure Blob Storage. +- `"Subdirectory"`: Specifies path segments if you want to limit your transfer to a virtual + directory in your container (for example, /my/images). +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least a name tag for your transfer + location. +""" +function create_location_azure_blob( + AgentArns, + AuthenticationType, + ContainerUrl; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "CreateLocationAzureBlob", + Dict{String,Any}( + "AgentArns" => AgentArns, + "AuthenticationType" => AuthenticationType, + "ContainerUrl" => ContainerUrl, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_location_azure_blob( + AgentArns, + AuthenticationType, + ContainerUrl, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "CreateLocationAzureBlob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "AgentArns" => AgentArns, + "AuthenticationType" => AuthenticationType, + "ContainerUrl" => ContainerUrl, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_location_efs(ec2_config, efs_filesystem_arn) create_location_efs(ec2_config, efs_filesystem_arn, params::Dict{String,<:Any}) @@ -466,9 +527,9 @@ Creates an endpoint for an Amazon FSx for Windows File Server file system. connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group. -- `user`: Specifies the user who has the permissions to access files and folders in the - file system. For information about choosing a user name that ensures sufficient permissions - to files, folders, and metadata, see user. +- `user`: Specifies the user who has the permissions to access files, folders, and metadata + in your file system. For information about choosing a user with sufficient permissions, see + Required permissions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -621,39 +682,38 @@ end create_location_nfs(on_prem_config, server_hostname, subdirectory) create_location_nfs(on_prem_config, server_hostname, subdirectory, params::Dict{String,<:Any}) -Defines a file system on a Network File System (NFS) server that can be read from or -written to. +Creates an endpoint for an Network File System (NFS) file server that DataSync can use for +a data transfer. # Arguments -- `on_prem_config`: Contains a list of Amazon Resource Names (ARNs) of agents that are used - to connect to an NFS server. If you are copying data to or from your Snowcone device, see - NFS Server on Snowcone for more information. -- `server_hostname`: The name of the NFS server. This value is the IP address or Domain - Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this - hostname to mount the NFS server in a network. If you are copying data to or from your - Snowcone device, see NFS Server on Snowcone for more information. This name must either be - DNS-compliant or must be an IP version 4 (IPv4) address. -- `subdirectory`: The subdirectory in the NFS file system that is used to read data from - the NFS source location or write data to the NFS destination. The NFS path should be a path - that's exported by the NFS server, or a subdirectory of that path. The path should be such - that it can be mounted by other NFS clients in your network. To see all the paths exported - by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access - to your server. You can specify any directory that appears in the results, and any - subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos - authentication. To transfer all the data in the folder you specified, DataSync needs to - have permissions to read all the data. To ensure this, either configure the NFS export with - no_root_squash, or ensure that the permissions for all of the files that you want DataSync - allow read access for all users. Doing either enables the agent to read the files. For the - agent to access directories, you must additionally enable all execute access. If you are - copying data to or from your Snowcone device, see NFS Server on Snowcone for more - information. For information about NFS export configuration, see 18.7. The /etc/exports - Configuration File in the Red Hat Enterprise Linux documentation. +- `on_prem_config`: Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses + to connect to your NFS file server. If you are copying data to or from your Snowcone + device, see NFS Server on Snowcone for more information. +- `server_hostname`: Specifies the IP address or domain name of your NFS file server. An + agent that is installed on-premises uses this hostname to mount the NFS server in a + network. If you are copying data to or from your Snowcone device, see NFS Server on + Snowcone for more information. You must specify be an IP version 4 address or Domain Name + System (DNS)-compliant name. +- `subdirectory`: Specifies the subdirectory in the NFS file server that DataSync transfers + to or from. The NFS path should be a path that's exported by the NFS server, or a + subdirectory of that path. The path should be such that it can be mounted by other NFS + clients in your network. To see all the paths exported by your NFS server, run \"showmount + -e nfs-server-name\" from an NFS client that has access to your server. You can specify any + directory that appears in the results, and any subdirectory of that directory. Ensure that + the NFS export is accessible without Kerberos authentication. To transfer all the data in + the folder you specified, DataSync needs to have permissions to read all the data. To + ensure this, either configure the NFS export with no_root_squash, or ensure that the + permissions for all of the files that you want DataSync allow read access for all users. + Doing either enables the agent to read the files. For the agent to access directories, you + must additionally enable all execute access. If you are copying data to or from your + Snowcone device, see NFS Server on Snowcone for more information. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MountOptions"`: The NFS mount options that DataSync can use to mount your NFS share. -- `"Tags"`: The key-value pair that represents the tag that you want to add to the - location. The value can be an empty string. We recommend using tags to name your resources. +- `"MountOptions"`: Specifies the mount options that DataSync can use to mount your NFS + share. +- `"Tags"`: Specifies labels that help you categorize, filter, and search for your Amazon + Web Services resources. We recommend creating at least a name tag for your location. """ function create_location_nfs( OnPremConfig, @@ -717,11 +777,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys required to authenticate with the object storage server. - `"SecretKey"`: Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server. -- `"ServerCertificate"`: Specifies a certificate to authenticate with an object storage - system that uses a private or self-signed certificate authority (CA). You must specify a - Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). - The certificate can be up to 32768 bytes (before Base64 encoding). To use this parameter, - configure ServerProtocol to HTTPS. +- `"ServerCertificate"`: Specifies a file with the certificates that are used to sign the + object storage server's certificate (for example, + file:///home/user/.ssh/storage_sys_certificate.pem). The file you specify must include the + following: The certificate of the signing certificate authority (CA) Any intermediate + certificates base64 encoding A .pem extension The file can be up to 32768 bytes + (before base64 encoding). To use this parameter, configure ServerProtocol to HTTPS. - `"ServerPort"`: Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443). - `"ServerProtocol"`: Specifies the protocol that your object storage server uses to @@ -839,8 +900,9 @@ end create_location_smb(agent_arns, password, server_hostname, subdirectory, user) create_location_smb(agent_arns, password, server_hostname, subdirectory, user, params::Dict{String,<:Any}) -Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access -for a transfer. For more information, see Creating an SMB location. +Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for +a data transfer. Before you begin, make sure that you understand how DataSync accesses an +SMB file server. # Arguments - `agent_arns`: Specifies the DataSync agent (or agents) which you want to connect to your @@ -1165,6 +1227,43 @@ function describe_discovery_job( ) end +""" + describe_location_azure_blob(location_arn) + describe_location_azure_blob(location_arn, params::Dict{String,<:Any}) + +Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage +is configured. + +# Arguments +- `location_arn`: Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage + transfer location. + +""" +function describe_location_azure_blob( + LocationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return datasync( + "DescribeLocationAzureBlob", + Dict{String,Any}("LocationArn" => LocationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_location_azure_blob( + LocationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "DescribeLocationAzureBlob", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("LocationArn" => LocationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_location_efs(location_arn) describe_location_efs(location_arn, params::Dict{String,<:Any}) @@ -1698,10 +1797,10 @@ end describe_task(task_arn) describe_task(task_arn, params::Dict{String,<:Any}) -Returns metadata about a task. +Provides information about an DataSync transfer task. # Arguments -- `task_arn`: The Amazon Resource Name (ARN) of the task to describe. +- `task_arn`: Specifies the Amazon Resource Name (ARN) of the transfer task. """ function describe_task(TaskArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1727,10 +1826,11 @@ end describe_task_execution(task_execution_arn) describe_task_execution(task_execution_arn, params::Dict{String,<:Any}) -Returns detailed metadata about a task that is being executed. +Provides information about an DataSync transfer task that's running. # Arguments -- `task_execution_arn`: The Amazon Resource Name (ARN) of the task that is being executed. +- `task_execution_arn`: Specifies the Amazon Resource Name (ARN) of the transfer task + that's running. """ function describe_task_execution( @@ -2395,6 +2495,61 @@ function update_discovery_job( ) end +""" + update_location_azure_blob(location_arn) + update_location_azure_blob(location_arn, params::Dict{String,<:Any}) + +Modifies some configurations of the Microsoft Azure Blob Storage transfer location that +you're using with DataSync. + +# Arguments +- `location_arn`: Specifies the ARN of the Azure Blob Storage transfer location that you're + updating. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AccessTier"`: Specifies the access tier that you want your objects or files transferred + into. This only applies when using the location as a transfer destination. For more + information, see Access tiers. +- `"AgentArns"`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that can + connect with your Azure Blob Storage container. You can specify more than one agent. For + more information, see Using multiple agents for your transfer. +- `"AuthenticationType"`: Specifies the authentication method DataSync uses to access your + Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS). +- `"BlobType"`: Specifies the type of blob that you want your objects or files to be when + transferring them into Azure Blob Storage. Currently, DataSync only supports moving data + into Azure Blob Storage as block blobs. For more information on blob types, see the Azure + Blob Storage documentation. +- `"SasConfiguration"`: Specifies the SAS configuration that allows DataSync to access your + Azure Blob Storage. +- `"Subdirectory"`: Specifies path segments if you want to limit your transfer to a virtual + directory in your container (for example, /my/images). +""" +function update_location_azure_blob( + LocationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return datasync( + "UpdateLocationAzureBlob", + Dict{String,Any}("LocationArn" => LocationArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_location_azure_blob( + LocationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datasync( + "UpdateLocationAzureBlob", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("LocationArn" => LocationArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_location_hdfs(location_arn) update_location_hdfs(location_arn, params::Dict{String,<:Any}) @@ -2469,27 +2624,26 @@ Updates some of the parameters of a previously created location for Network File NFS. # Arguments -- `location_arn`: The Amazon Resource Name (ARN) of the NFS location to update. +- `location_arn`: Specifies the Amazon Resource Name (ARN) of the NFS location that you + want to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MountOptions"`: - `"OnPremConfig"`: -- `"Subdirectory"`: The subdirectory in the NFS file system that is used to read data from - the NFS source location or write data to the NFS destination. The NFS path should be a path - that's exported by the NFS server, or a subdirectory of that path. The path should be such - that it can be mounted by other NFS clients in your network. To see all the paths exported - by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access - to your server. You can specify any directory that appears in the results, and any - subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos - authentication. To transfer all the data in the folder that you specified, DataSync must - have permissions to read all the data. To ensure this, either configure the NFS export with - no_root_squash, or ensure that the files you want DataSync to access have permissions that - allow read access for all users. Doing either option enables the agent to read the files. - For the agent to access directories, you must additionally enable all execute access. If - you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more - information. For information about NFS export configuration, see 18.7. The /etc/exports - Configuration File in the Red Hat Enterprise Linux documentation. +- `"Subdirectory"`: Specifies the subdirectory in your NFS file system that DataSync uses + to read from or write to during a transfer. The NFS path should be exported by the NFS + server, or a subdirectory of that path. The path should be such that it can be mounted by + other NFS clients in your network. To see all the paths exported by your NFS server, run + \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can + specify any directory that appears in the results, and any subdirectory of that directory. + Ensure that the NFS export is accessible without Kerberos authentication. To transfer all + the data in the folder that you specified, DataSync must have permissions to read all the + data. To ensure this, either configure the NFS export with no_root_squash, or ensure that + the files you want DataSync to access have permissions that allow read access for all + users. Doing either option enables the agent to read the files. For the agent to access + directories, you must additionally enable all execute access. If you are copying data to or + from your Snowcone device, see NFS Server on Snowcone for more information. """ function update_location_nfs(LocationArn; aws_config::AbstractAWSConfig=global_aws_config()) return datasync( @@ -2644,7 +2798,7 @@ with DataSync Discovery. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AgentArns"`: Specifies the Amazon Resource Name (ARN) of the DataSync agent that - connects to and reads your on-premises storage system. + connects to and reads your on-premises storage system. You can only specify one ARN. - `"CloudWatchLogGroupArn"`: Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events. - `"Credentials"`: Specifies the user name and password for accessing your on-premises diff --git a/src/services/devops_guru.jl b/src/services/devops_guru.jl index 99dd0e64f4..3138592fa4 100644 --- a/src/services/devops_guru.jl +++ b/src/services/devops_guru.jl @@ -11,16 +11,12 @@ using AWS.UUIDs Adds a notification channel to DevOps Guru. A notification channel is used to notify you about important DevOps Guru events, such as when an insight is generated. If you use an Amazon SNS topic in another account, you must attach a policy to it that grants DevOps Guru -permission to it notifications. DevOps Guru adds the required policy on your behalf to send -notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS -topics. For more information, see Permissions for cross account Amazon SNS topics. If you -use an Amazon SNS topic in another account, you must attach a policy to it that grants -DevOps Guru permission to it notifications. DevOps Guru adds the required policy on your -behalf to send notifications using Amazon SNS in your account. For more information, see -Permissions for cross account Amazon SNS topics. If you use an Amazon SNS topic that is -encrypted by an Amazon Web Services Key Management Service customer-managed key (CMK), then -you must add permissions to the CMK. For more information, see Permissions for Amazon Web -Services KMS–encrypted Amazon SNS topics. +permission to send it notifications. DevOps Guru adds the required policy on your behalf to +send notifications using Amazon SNS in your account. DevOps Guru only supports standard SNS +topics. For more information, see Permissions for Amazon SNS topics. If you use an Amazon +SNS topic that is encrypted by an Amazon Web Services Key Management Service +customer-managed key (CMK), then you must add permissions to the CMK. For more information, +see Permissions for Amazon Web Services KMS–encrypted Amazon SNS topics. # Arguments - `config`: A NotificationChannelConfig object that specifies what type of notification diff --git a/src/services/docdb.jl b/src/services/docdb.jl index a85fcf8b16..18584a632b 100644 --- a/src/services/docdb.jl +++ b/src/services/docdb.jl @@ -1947,6 +1947,10 @@ configuration parameters by specifying these parameters and the new values in th # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are + allowed. Constraints: You must allow major version upgrades when specifying a value for the + EngineVersion parameter that is a different major version than the DB cluster's current + version. - `"ApplyImmediately"`: A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster. If this parameter is set to false, @@ -1970,7 +1974,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted. - `"EngineVersion"`: The version number of the database engine to which you want to - upgrade. Modifying engine version is not supported on Amazon DocumentDB. + upgrade. Changing this parameter results in an outage. The change is applied during the + next maintenance window unless ApplyImmediately is enabled. To list all of the available + engine versions for Amazon DocumentDB use the following command: aws docdb + describe-db-engine-versions --engine docdb --query \"DBEngineVersions[].EngineVersion\" - `"MasterUserPassword"`: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@). Constraints: Must contain from 8 to 100 characters. diff --git a/src/services/dynamodb.jl b/src/services/dynamodb.jl index dc524e7e66..d5192998a0 100644 --- a/src/services/dynamodb.jl +++ b/src/services/dynamodb.jl @@ -618,6 +618,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys from the small network and processing overhead of receiving a larger response. No read capacity units are consumed. The ReturnValues parameter is used by several DynamoDB operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for a DeleteItem operation that failed a condition check. There is no additional + cost associated with requesting a return value aside from the small network and processing + overhead of receiving a larger response. No read capacity units are consumed. """ function delete_item(Key, TableName; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -808,9 +812,8 @@ end describe_endpoints() describe_endpoints(params::Dict{String,<:Any}) -Returns the regional endpoint information. This action must be included in your VPC -endpoint policies, or access to the DescribeEndpoints API will be denied. For more -information on policy permissions, please see Internetwork traffic privacy. +Returns the regional endpoint information. For more information on policy permissions, +please see Internetwork traffic privacy. """ function describe_endpoints(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1288,6 +1291,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys statement response. - `"Parameters"`: The parameters for the PartiQL statement, if any. - `"ReturnConsumedCapacity"`: +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for an ExecuteStatement operation that failed a condition check. There is no + additional cost associated with requesting a return value aside from the small network and + processing overhead of receiving a larger response. No read capacity units are consumed. """ function execute_statement(Statement; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -1895,6 +1902,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response. No read capacity units are consumed. The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for a PutItem operation that failed a condition check. There is no additional + cost associated with requesting a return value aside from the small network and processing + overhead of receiving a larger response. No read capacity units are consumed. """ function put_item(Item, TableName; aws_config::AbstractAWSConfig=global_aws_config()) return dynamodb( @@ -2254,22 +2265,32 @@ end The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a -FilterExpression operation. If the total number of scanned items exceeds the maximum -dataset size limit of 1 MB, the scan stops and results are returned to the user as a -LastEvaluatedKey value to continue the scan in a subsequent operation. The results also -include the number of items exceeding the limit. A scan can result in no table data meeting -the filter criteria. A single Scan operation reads up to the maximum number of items set -(if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to -the results using FilterExpression. If LastEvaluatedKey is present in the response, you -need to paginate the result set. For more information, see Paginating the Results in the -Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; however, for -faster performance on a large table or secondary index, applications can request a parallel -Scan operation by providing the Segment and TotalSegments parameters. For more information, -see Parallel Scan in the Amazon DynamoDB Developer Guide. Scan uses eventually consistent -reads when accessing the data in a table; therefore, the result set might not include the -changes to data in the table immediately before the operation began. If you need a -consistent copy of the data, as of the time that the Scan begins, you can set the -ConsistentRead parameter to true. +FilterExpression operation. If the total size of scanned items exceeds the maximum dataset +size limit of 1 MB, the scan completes and results are returned to the user. The +LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to +continue the scan in a subsequent operation. Each scan response also includes number of +items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, +a scan result can result in no items meeting the criteria and the Count will result in +zero. If you did not use a FilterExpression in the scan request, then Count is the same as +ScannedCount. Count and ScannedCount only return the count of items specific to a single +scan request and, unless the table is less than 1MB, do not represent the total number of +items in the table. A single Scan operation first reads up to the maximum number of items +set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any +filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present +in the response, pagination is required to complete the full table scan. For more +information, see Paginating the Results in the Amazon DynamoDB Developer Guide. Scan +operations proceed sequentially; however, for faster performance on a large table or +secondary index, applications can request a parallel Scan operation by providing the +Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon +DynamoDB Developer Guide. By default, a Scan uses eventually consistent reads when +accessing the items in a table. Therefore, the results from an eventually consistent Scan +may not include the latest item changes at the time the scan iterates through each item in +the table. If you require a strongly consistent read of each item as the scan iterates +through the items in the table, you can set the ConsistentRead parameter to true. Strong +consistency only relates to the consistency of the read at the item level. DynamoDB does +not provide snapshot isolation for a scan operation when the ConsistentRead parameter is +set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan +see a consistent snapshot of the table when the scan operation was requested. # Arguments - `table_name`: The name of the table containing the requested items; or, if you provide @@ -2956,6 +2977,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys operation. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed. The values returned are strongly consistent. +- `"ReturnValuesOnConditionCheckFailure"`: An optional parameter that returns the item + attributes for an UpdateItem operation that failed a condition check. There is no + additional cost associated with requesting a return value aside from the small network and + processing overhead of receiving a larger response. No read capacity units are consumed. - `"UpdateExpression"`: An expression that defines one or more attributes to be updated, the action to be performed on them, and new values for them. The following action values are available for UpdateExpression. SET - Adds one or more attributes and values to an diff --git a/src/services/ebs.jl b/src/services/ebs.jl index 713052390c..fc47935585 100644 --- a/src/services/ebs.jl +++ b/src/services/ebs.jl @@ -10,7 +10,10 @@ using AWS.UUIDs Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks -to a snapshot after it has been completed. +to a snapshot after it has been completed. You should always retry requests that receive +server (5xx) error responses, and ThrottlingException and RequestThrottledException client +error responses. For more information see Error retries in the Amazon Elastic Compute Cloud +User Guide. # Arguments - `snapshot_id`: The ID of the snapshot. @@ -71,7 +74,10 @@ end get_snapshot_block(block_index, block_token, snapshot_id) get_snapshot_block(block_index, block_token, snapshot_id, params::Dict{String,<:Any}) -Returns the data in a block in an Amazon Elastic Block Store snapshot. +Returns the data in a block in an Amazon Elastic Block Store snapshot. You should always +retry requests that receive server (5xx) error responses, and ThrottlingException and +RequestThrottledException client error responses. For more information see Error retries in +the Amazon Elastic Compute Cloud User Guide. # Arguments - `block_index`: The block index of the block in which to read the data. A block index is a @@ -120,7 +126,10 @@ end list_changed_blocks(second_snapshot_id, params::Dict{String,<:Any}) Returns information about the blocks that are different between two Amazon Elastic Block -Store snapshots of the same volume/snapshot lineage. +Store snapshots of the same volume/snapshot lineage. You should always retry requests that +receive server (5xx) error responses, and ThrottlingException and RequestThrottledException +client error responses. For more information see Error retries in the Amazon Elastic +Compute Cloud User Guide. # Arguments - `second_snapshot_id`: The ID of the second snapshot to use for the comparison. The @@ -171,7 +180,10 @@ end list_snapshot_blocks(snapshot_id) list_snapshot_blocks(snapshot_id, params::Dict{String,<:Any}) -Returns information about the blocks in an Amazon Elastic Block Store snapshot. +Returns information about the blocks in an Amazon Elastic Block Store snapshot. You should +always retry requests that receive server (5xx) error responses, and ThrottlingException +and RequestThrottledException client error responses. For more information see Error +retries in the Amazon Elastic Compute Cloud User Guide. # Arguments - `snapshot_id`: The ID of the snapshot from which to get block indexes and block tokens. @@ -217,7 +229,10 @@ end Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state. Data written to a -snapshot must be aligned with 512-KiB sectors. +snapshot must be aligned with 512-KiB sectors. You should always retry requests that +receive server (5xx) error responses, and ThrottlingException and RequestThrottledException +client error responses. For more information see Error retries in the Amazon Elastic +Compute Cloud User Guide. # Arguments - `block_data`: The data to write to the block. The block data is not signed as part of the @@ -309,7 +324,9 @@ end Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes. After creating the snapshot, use PutSnapshotBlock to write blocks of -data to the snapshot. +data to the snapshot. You should always retry requests that receive server (5xx) error +responses, and ThrottlingException and RequestThrottledException client error responses. +For more information see Error retries in the Amazon Elastic Compute Cloud User Guide. # Arguments - `volume_size`: The size of the volume, in GiB. The maximum size is 65536 GiB (64 TiB). diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 2cd4b2809a..a351df4c04 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -415,8 +415,8 @@ function allocate_address( end """ - allocate_hosts(availability_zone, quantity) - allocate_hosts(availability_zone, quantity, params::Dict{String,<:Any}) + allocate_hosts(availability_zone) + allocate_hosts(availability_zone, params::Dict{String,<:Any}) Allocates a Dedicated Host to your account. At a minimum, specify the supported instance type or instance family, the Availability Zone in which to allocate the host, and the @@ -424,11 +424,16 @@ number of hosts to allocate. # Arguments - `availability_zone`: The Availability Zone in which to allocate the Dedicated Host. -- `quantity`: The number of Dedicated Hosts to allocate to your account with these - parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AssetId"`: The IDs of the Outpost hardware assets on which to allocate the Dedicated + Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency + between your workloads. This parameter is supported only if you specify OutpostArn. If you + are allocating the Dedicated Hosts in a Region, omit this parameter. If you specify this + parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on + each specified hardware asset. If you specify both AssetIds and Quantity, then the value + for Quantity must be equal to the number of asset IDs specified. - `"HostMaintenance"`: Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. - `"HostRecovery"`: Indicates whether to enable or disable host recovery for the Dedicated @@ -440,7 +445,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys type only, omit this parameter and specify InstanceType instead. You cannot specify InstanceFamily and InstanceType in the same request. - `"OutpostArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services Outpost on - which to allocate the Dedicated Host. + which to allocate the Dedicated Host. If you specify OutpostArn, you can optionally specify + AssetIds. If you are allocating the Dedicated Host in a Region, omit this parameter. - `"TagSpecification"`: The tags to apply to the Dedicated Host during creation. - `"autoPlacement"`: Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance @@ -453,20 +459,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance type only. If you want the Dedicated Hosts to support multiple instance types in a specific instance family, omit this parameter and specify InstanceFamily instead. You cannot specify InstanceType and InstanceFamily in the same request. +- `"quantity"`: The number of Dedicated Hosts to allocate to your account with these + parameters. If you are allocating the Dedicated Hosts on an Outpost, and you specify + AssetIds, you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host + on each specified hardware asset. If you specify both AssetIds and Quantity, then the value + that you specify for Quantity must be equal to the number of asset IDs specified. """ -function allocate_hosts( - availabilityZone, quantity; aws_config::AbstractAWSConfig=global_aws_config() -) +function allocate_hosts(availabilityZone; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( "AllocateHosts", - Dict{String,Any}("availabilityZone" => availabilityZone, "quantity" => quantity); + Dict{String,Any}("availabilityZone" => availabilityZone); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function allocate_hosts( availabilityZone, - quantity, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -474,11 +482,7 @@ function allocate_hosts( "AllocateHosts", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "availabilityZone" => availabilityZone, "quantity" => quantity - ), - params, + _merge, Dict{String,Any}("availabilityZone" => availabilityZone), params ), ); aws_config=aws_config, @@ -752,10 +756,10 @@ end assign_private_nat_gateway_address(nat_gateway_id, params::Dict{String,<:Any}) Assigns one or more private IPv4 addresses to a private NAT gateway. For more information, -see Work with NAT gateways in the Amazon Virtual Private Cloud User Guide. +see Work with NAT gateways in the Amazon VPC User Guide. # Arguments -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -913,7 +917,7 @@ any existing instances and all new instances that you launch in that VPC use the You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance. For more -information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide. +information, see DHCP options sets in the Amazon VPC User Guide. # Arguments - `dhcp_options_id`: The ID of the DHCP options set, or default to associate no DHCP @@ -1180,15 +1184,15 @@ end associate_nat_gateway_address(allocation_id, nat_gateway_id, params::Dict{String,<:Any}) Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT -gateway. For more information, see Work with NAT gateways in the Amazon Virtual Private -Cloud User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT -gateway. You can increase the limit by requesting a quota adjustment. For more information, -see Elastic IP address quotas in the Amazon Virtual Private Cloud User Guide. +gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By +default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can +increase the limit by requesting a quota adjustment. For more information, see Elastic IP +address quotas in the Amazon VPC User Guide. # Arguments - `allocation_id`: The allocation IDs of EIPs that you want to associate with your NAT gateway. -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1239,7 +1243,7 @@ to your VPC with a route table in your VPC. This association causes traffic from or gateway to be routed according to the routes in the route table. The action returns an association ID, which you need in order to disassociate the route table later. A route table can be associated with multiple subnets. For more information, see Route tables in -the Amazon Virtual Private Cloud User Guide. +the Amazon VPC User Guide. # Arguments - `route_table_id`: The ID of the route table. @@ -1567,8 +1571,8 @@ Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool provisioned through bring your own IP addresses (BYOIP). The IPv6 CIDR block size is fixed at /56. You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block. For more information about associating CIDR -blocks with your VPC and applicable restrictions, see VPC and subnet sizing in the Amazon -Virtual Private Cloud User Guide. +blocks with your VPC and applicable restrictions, see IP addressing for your VPCs and +subnets in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -1623,22 +1627,20 @@ end attach_classic_link_vpc(security_group_id, instance_id, vpc_id) attach_classic_link_vpc(security_group_id, instance_id, vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Links an EC2-Classic instance to a ClassicLink-enabled VPC through one or more -of the VPC's security groups. You cannot link an EC2-Classic instance to more than one VPC -at a time. You can only link an instance that's in the running state. An instance is -automatically unlinked from a VPC when it's stopped - you can link it to the VPC again when -you restart it. After you've linked an instance, you cannot change the VPC security groups -that are associated with it. To change the security groups, you must first unlink the -instance, and then link it again. Linking your instance to a VPC is sometimes referred to -as attaching your instance. + This action is deprecated. Links an EC2-Classic instance to a ClassicLink-enabled VPC +through one or more of the VPC security groups. You cannot link an EC2-Classic instance to +more than one VPC at a time. You can only link an instance that's in the running state. An +instance is automatically unlinked from a VPC when it's stopped - you can link it to the +VPC again when you restart it. After you've linked an instance, you cannot change the VPC +security groups that are associated with it. To change the security groups, you must first +unlink the instance, and then link it again. Linking your instance to a VPC is sometimes +referred to as attaching your instance. # Arguments -- `security_group_id`: The ID of one or more of the VPC's security groups. You cannot - specify security groups from a different VPC. -- `instance_id`: The ID of an EC2-Classic instance to link to the ClassicLink-enabled VPC. -- `vpc_id`: The ID of a ClassicLink-enabled VPC. +- `security_group_id`: The IDs of the security groups. You cannot specify security groups + from a different VPC. +- `instance_id`: The ID of the EC2-Classic instance. +- `vpc_id`: The ID of the ClassicLink-enabled VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1690,8 +1692,8 @@ end attach_internet_gateway(internet_gateway_id, vpc_id, params::Dict{String,<:Any}) Attaches an internet gateway or a virtual private gateway to a VPC, enabling connectivity -between the internet and the VPC. For more information about your VPC and internet gateway, -see the Amazon Virtual Private Cloud User Guide. +between the internet and the VPC. For more information, see Internet gateways in the Amazon +VPC User Guide. # Arguments - `internet_gateway_id`: The ID of the internet gateway. @@ -2043,16 +2045,16 @@ end authorize_security_group_egress(group_id) authorize_security_group_egress(group_id, params::Dict{String,<:Any}) -[VPC only] Adds the specified outbound (egress) rules to a security group for use with a -VPC. An outbound rule permits instances to send traffic to the specified IPv4 or IPv6 CIDR -address ranges, or to the instances that are associated with the specified source security -groups. When specifying an outbound rule for your security group in a VPC, the -IpPermissions must include a destination for the traffic. You specify a protocol for each -rule (for example, TCP). For the TCP and UDP protocols, you must also specify the -destination port or port range. For the ICMP protocol, you must also specify the ICMP type -and code. You can use -1 for the type or code to mean all types or all codes. Rule changes -are propagated to affected instances as quickly as possible. However, a small delay might -occur. For information about VPC security group quotas, see Amazon VPC quotas. +Adds the specified outbound (egress) rules to a security group for use with a VPC. An +outbound rule permits instances to send traffic to the specified IPv4 or IPv6 CIDR address +ranges, or to the instances that are associated with the specified source security groups. +When specifying an outbound rule for your security group in a VPC, the IpPermissions must +include a destination for the traffic. You specify a protocol for each rule (for example, +TCP). For the TCP and UDP protocols, you must also specify the destination port or port +range. For the ICMP protocol, you must also specify the ICMP type and code. You can use -1 +for the type or code to mean all types or all codes. Rule changes are propagated to +affected instances as quickly as possible. However, a small delay might occur. For +information about VPC security group quotas, see Amazon VPC quotas. # Arguments - `group_id`: The ID of the security group. @@ -2109,9 +2111,7 @@ and UDP, you must also specify the destination port or port range. For ICMP/ICMP must also specify the ICMP/ICMPv6 type and code. You can use -1 to mean all types or all codes. Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur. For more information about VPC security group -quotas, see Amazon VPC quotas. We are retiring EC2-Classic. We recommend that you migrate -from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in -the Amazon Elastic Compute Cloud User Guide. +quotas, see Amazon VPC quotas. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2126,22 +2126,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GroupId"`: The ID of the security group. You must specify either the security group ID or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. +- `"GroupName"`: [Default VPC] The name of the security group. You must specify either the + security group ID or the security group name in the request. For security groups in a + nondefault VPC, you must specify the security group ID. - `"IpPermissions"`: The sets of IP permissions. - `"IpProtocol"`: The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). - To specify icmpv6, use a set of IP permissions. [VPC only] Use -1 to specify all protocols. - If you specify -1 or a protocol other than tcp, udp, or icmp, traffic on all ports is - allowed, regardless of any ports you specify. Alternatively, use a set of IP permissions to - specify multiple rules and a description for the rule. -- `"SourceSecurityGroupName"`: [EC2-Classic, default VPC] The name of the source security - group. You can't specify this parameter in combination with the following parameters: the - CIDR IP address range, the start of the port range, the IP protocol, and the end of the - port range. Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with - a specific IP protocol and port range, use a set of IP permissions instead. For EC2-VPC, - the source security group must be in the same VPC. -- `"SourceSecurityGroupOwnerId"`: [nondefault VPC] The Amazon Web Services account ID for + To specify icmpv6, use a set of IP permissions. Use -1 to specify all protocols. If you + specify -1 or a protocol other than tcp, udp, or icmp, traffic on all ports is allowed, + regardless of any ports you specify. Alternatively, use a set of IP permissions to specify + multiple rules and a description for the rule. +- `"SourceSecurityGroupName"`: [Default VPC] The name of the source security group. You + can't specify this parameter in combination with the following parameters: the CIDR IP + address range, the start of the port range, the IP protocol, and the end of the port range. + Creates rules that grant full ICMP, UDP, and TCP access. To create a rule with a specific + IP protocol and port range, use a set of IP permissions instead. The source security group + must be in the same VPC. +- `"SourceSecurityGroupOwnerId"`: [Nondefault VPC] The Amazon Web Services account ID for the source security group, if the source security group is in a different account. You can't specify this parameter in combination with the following parameters: the CIDR IP address range, the IP protocol, the start of the port range, and the end of the port range. @@ -3539,7 +3539,7 @@ end Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more -information, see Creating a default subnet in the Amazon Virtual Private Cloud User Guide. +information, see Create a default subnet in the Amazon VPC User Guide. # Arguments - `availability_zone`: The Availability Zone in which to create the default subnet. @@ -3586,15 +3586,9 @@ end Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default -VPC and default subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify -the components of the default VPC yourself. If you deleted your previous default VPC, you -can create a default VPC. You cannot have more than one default VPC per Region. If your -account supports EC2-Classic, you cannot use this action to create a default VPC in a -Region that supports EC2-Classic. If you want a default VPC in a Region that supports -EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that -possible?\" in the Default VPCs FAQ. We are retiring EC2-Classic. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon Elastic Compute Cloud User Guide. +VPCs in the Amazon VPC User Guide. You cannot specify the components of the default VPC +yourself. If you deleted your previous default VPC, you can create a default VPC. You +cannot have more than one default VPC per Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3641,7 +3635,7 @@ node types, see RFC 2132. Your VPC automatically starts out with a set of DHCP that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more -information, see DHCP options sets in the Amazon Virtual Private Cloud User Guide. +information, see DHCP options sets in the Amazon VPC User Guide. # Arguments - `dhcp_configuration`: A DHCP configuration option. @@ -3726,9 +3720,11 @@ end create_fleet(target_capacity_specification, item) create_fleet(target_capacity_specification, item, params::Dict{String,<:Any}) -Launches an EC2 Fleet. You can create a single EC2 Fleet that includes multiple launch -specifications that vary by instance type, AMI, Availability Zone, or subnet. For more -information, see EC2 Fleet in the Amazon EC2 User Guide. +Creates an EC2 Fleet that contains the configuration information for On-Demand Instances +and Spot Instances. Instances are launched immediately if there is available capacity. A +single EC2 Fleet can include multiple launch specifications that vary by instance type, +AMI, Availability Zone, or subnet. For more information, see EC2 Fleet in the Amazon EC2 +User Guide. # Arguments - `target_capacity_specification`: The number of units to request. @@ -3966,18 +3962,11 @@ end create_image(instance_id, name, params::Dict{String,<:Any}) Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running -or stopped. By default, when Amazon EC2 creates the new AMI, it reboots the instance so -that it can take snapshots of the attached volumes while data is at rest, in order to -ensure a consistent state. You can set the NoReboot parameter to true in the API request, -or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and -rebooting the instance. If you choose to bypass the shutdown and reboot process by setting -the NoReboot parameter to true in the API request, or by using the --no-reboot option in -the CLI, we can't guarantee the file system integrity of the created image. If you -customized your instance with instance store volumes or Amazon EBS volumes in addition to -the root device volume, the new AMI contains block device mapping information for those -volumes. When you launch an instance from this new AMI, the instance automatically launches -with those additional volumes. For more information, see Create an Amazon EBS-backed Linux -AMI in the Amazon Elastic Compute Cloud User Guide. +or stopped. If you customized your instance with instance store volumes or Amazon EBS +volumes in addition to the root device volume, the new AMI contains block device mapping +information for those volumes. When you launch an instance from this new AMI, the instance +automatically launches with those additional volumes. For more information, see Create an +Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -4001,14 +3990,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"noReboot"`: By default, when Amazon EC2 creates the new AMI, it reboots the instance so - that it can take snapshots of the attached volumes while data is at rest, in order to - ensure a consistent state. You can set the NoReboot parameter to true in the API request, - or use the --no-reboot option in the CLI to prevent Amazon EC2 from shutting down and - rebooting the instance. If you choose to bypass the shutdown and reboot process by setting - the NoReboot parameter to true in the API request, or by using the --no-reboot option in - the CLI, we can't guarantee the file system integrity of the created image. Default: false - (follow standard reboot process) +- `"noReboot"`: Indicates whether or not the instance should be automatically rebooted + before creating the image. Specify one of the following values: true - The instance is + not rebooted before creating the image. This creates crash-consistent snapshots that + include only the data that has been written to the volumes at the time the snapshots are + created. Buffered data and data in memory that has not yet been written to the volumes is + not included in the snapshots. false - The instance is rebooted before creating the + image. This ensures that all buffered data and data in memory is written to the volumes + before the snapshots are created. Default: false """ function create_image(instanceId, name; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -4041,8 +4030,8 @@ end create_instance_connect_endpoint(subnet_id, params::Dict{String,<:Any}) Creates an EC2 Instance Connect Endpoint. An EC2 Instance Connect Endpoint allows you to -connect to a resource, without requiring the resource to have a public IPv4 address. For -more information, see Connect to your resources without requiring a public IPv4 address +connect to an instance, without requiring the instance to have a public IPv4 address. For +more information, see Connect to your instances without requiring a public IPv4 address using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide. # Arguments @@ -4150,9 +4139,9 @@ end create_instance_export_task(export_to_s3, instance_id, target_environment, params::Dict{String,<:Any}) Exports a running or stopped instance to an Amazon S3 bucket. For information about the -supported operating systems, image formats, and known limitations for the types of -instances you can export, see Exporting an instance as a VM Using VM Import/Export in the -VM Import/Export User Guide. +prerequisites for your Amazon S3 bucket, supported operating systems, image formats, and +known limitations for the types of instances you can export, see Exporting an instance as a +VM Using VM Import/Export in the VM Import/Export User Guide. # Arguments - `export_to_s3`: The format and location for an export instance task. @@ -4212,8 +4201,8 @@ end create_internet_gateway(params::Dict{String,<:Any}) Creates an internet gateway for use with a VPC. After creating the internet gateway, you -attach it to a VPC using AttachInternetGateway. For more information about your VPC and -internet gateway, see the Amazon Virtual Private Cloud User Guide. +attach it to a VPC using AttachInternetGateway. For more information, see Internet gateways +in the Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4954,7 +4943,7 @@ private communication is routed across VPCs and on-premises networks through a t gateway or virtual private gateway. Common use cases include running large workloads behind a small pool of allowlisted IPv4 addresses, preserving private IPv4 addresses, and communicating between overlapping networks. For more information, see NAT gateways in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `subnet_id`: The ID of the subnet in which to create the NAT gateway. @@ -4975,16 +4964,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"PrivateIpAddress"`: The private IPv4 address to assign to the NAT gateway. If you don't provide an address, a private IPv4 address will be automatically assigned. -- `"SecondaryAllocationId"`: Secondary EIP allocation IDs. For more information about - secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud User - Guide. +- `"SecondaryAllocationId"`: Secondary EIP allocation IDs. For more information, see Create + a NAT gateway in the Amazon VPC User Guide. - `"SecondaryPrivateIpAddress"`: Secondary private IPv4 addresses. For more information - about secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud - User Guide. + about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. - `"SecondaryPrivateIpAddressCount"`: [Private NAT gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT gateway. For more information about - secondary addresses, see Create a NAT gateway in the Amazon Virtual Private Cloud User - Guide. + secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. - `"TagSpecification"`: The tags to assign to the NAT gateway. """ function create_nat_gateway(SubnetId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -5020,7 +5006,7 @@ end Creates a network ACL in a VPC. Network ACLs provide an optional layer of security (in addition to security groups) for the instances in your VPC. For more information, see -Network ACLs in the Amazon Virtual Private Cloud User Guide. +Network ACLs in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -5065,7 +5051,7 @@ and not number them one right after the other (for example, 101, 102, 103, ...). it easier to add a rule between existing ones without having to renumber the rules. After you add an entry, you can't modify it; you must either replace it, or create an entry and delete the old one. For more information about network ACLs, see Network ACLs in the Amazon -Virtual Private Cloud User Guide. +VPC User Guide. # Arguments - `egress`: Indicates whether this is an egress rule (rule is applied to traffic leaving @@ -5700,8 +5686,8 @@ route table includes the following two IPv4 routes: 192.0.2.0/24 (goes to som 192.0.2.0/28 (goes to some target B) Both routes apply to the traffic destined for 192.0.2.3. However, the second route in the list covers a smaller number of IP addresses and is therefore more specific, so we use that route to determine where to target the -traffic. For more information about route tables, see Route tables in the Amazon Virtual -Private Cloud User Guide. +traffic. For more information about route tables, see Route tables in the Amazon VPC User +Guide. # Arguments - `route_table_id`: The ID of the route table for the route. @@ -5764,7 +5750,7 @@ end Creates a route table for the specified VPC. After you create a route table, you can add routes and associate the table with a subnet. For more information, see Route tables in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -5803,32 +5789,26 @@ Creates a security group. A security group acts as a virtual firewall for your i control inbound and outbound traffic. For more information, see Amazon EC2 security groups in the Amazon Elastic Compute Cloud User Guide and Security groups for your VPC in the Amazon Virtual Private Cloud User Guide. When you create a security group, you specify a -friendly name of your choice. You can have a security group for use in EC2-Classic with the -same name as a security group for use in a VPC. However, you can't have two security groups -for use in EC2-Classic with the same name or two security groups for use in a VPC with the -same name. You have a default security group for use in EC2-Classic and a default security -group for use in your VPC. If you don't specify a security group when you launch an -instance, the instance is launched into the appropriate default security group. A default -security group includes a default rule that grants instances unrestricted network access to -each other. You can add or remove rules from your security groups using -AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, RevokeSecurityGroupIngress, -and RevokeSecurityGroupEgress. For more information about VPC security group limits, see -Amazon VPC Limits. We are retiring EC2-Classic. We recommend that you migrate from -EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC in the -Amazon Elastic Compute Cloud User Guide. +friendly name of your choice. You can't have two security groups for the same VPC with the +same name. You have a default security group for use in your VPC. If you don't specify a +security group when you launch an instance, the instance is launched into the appropriate +default security group. A default security group includes a default rule that grants +instances unrestricted network access to each other. You can add or remove rules from your +security groups using AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress, +RevokeSecurityGroupIngress, and RevokeSecurityGroupEgress. For more information about VPC +security group limits, see Amazon VPC Limits. # Arguments - `group_description`: A description for the security group. Constraints: Up to 255 - characters in length Constraints for EC2-Classic: ASCII characters Constraints for EC2-VPC: - a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* + characters in length Valid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* - `group_name`: The name of the security group. Constraints: Up to 255 characters in - length. Cannot start with sg-. Constraints for EC2-Classic: ASCII characters Constraints - for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!* + length. Cannot start with sg-. Valid characters: a-z, A-Z, 0-9, spaces, and + ._-:/()#,@[]+=&;{}!* # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"TagSpecification"`: The tags to assign to the security group. -- `"VpcId"`: [EC2-VPC] The ID of the VPC. Required for EC2-VPC. +- `"VpcId"`: The ID of the VPC. Required for a nondefault VPC. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -6111,7 +6091,7 @@ an IPv6 subnet is a /64 netmask. If you add more than one subnet to a VPC, they' in a star topology with a logical router in the middle. When you stop an instance in a subnet, it retains its private IPv4 address. It's therefore possible to have a subnet with no running instances (they're all stopped), but no remaining IP addresses available. For -more information, see Subnets in the Amazon Virtual Private Cloud User Guide. +more information, see Subnets in the Amazon VPC User Guide. # Arguments - `vpc_id`: The ID of the VPC. @@ -6122,9 +6102,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about - the Regions that support Local Zones, see Available Regions in the Amazon Elastic Compute - Cloud User Guide. To create a subnet in an Outpost, set this value to the Availability Zone - for the Outpost and specify the Outpost ARN. + the Regions that support Local Zones, see Local Zones locations. To create a subnet in an + Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost + ARN. - `"AvailabilityZoneId"`: The AZ ID or the Local Zone ID of the subnet. - `"CidrBlock"`: The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you @@ -7427,7 +7407,8 @@ Guide. For more information, see Create an Amazon EBS volume in the Amazon Elast Cloud User Guide. # Arguments -- `availability_zone`: The Availability Zone in which to create the volume. +- `availability_zone`: The ID of the Availability Zone in which to create the volume. For + example, us-east-1a. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7519,16 +7500,16 @@ end create_vpc() create_vpc(params::Dict{String,<:Any}) -Creates a VPC with the specified CIDR blocks. For more information, see VPC CIDR blocks in -the Amazon Virtual Private Cloud User Guide. You can optionally request an IPv6 CIDR block -for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 -addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through -bring your own IP addresses (BYOIP). By default, each instance that you launch in the VPC -has the default DHCP options, which include only a default DNS server that we provide -(AmazonProvidedDNS). For more information, see DHCP option sets in the Amazon Virtual -Private Cloud User Guide. You can specify the instance tenancy value for the VPC when you -create it. You can't change this value for the VPC after you create it. For more -information, see Dedicated Instances in the Amazon Elastic Compute Cloud User Guide. +Creates a VPC with the specified CIDR blocks. For more information, see IP addressing for +your VPCs and subnets in the Amazon VPC User Guide. You can optionally request an IPv6 CIDR +block for the VPC. You can request an Amazon-provided IPv6 CIDR block from Amazon's pool of +IPv6 addresses, or an IPv6 CIDR block from an IPv6 address pool that you provisioned +through bring your own IP addresses (BYOIP). By default, each instance that you launch in +the VPC has the default DHCP options, which include only a default DNS server that we +provide (AmazonProvidedDNS). For more information, see DHCP option sets in the Amazon VPC +User Guide. You can specify the instance tenancy value for the VPC when you create it. You +can't change this value for the VPC after you create it. For more information, see +Dedicated Instances in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7582,14 +7563,14 @@ end create_vpc_endpoint(service_name, vpc_id) create_vpc_endpoint(service_name, vpc_id, params::Dict{String,<:Any}) -Creates a VPC endpoint for a specified service. An endpoint enables you to create a private -connection between your VPC and the service. The service may be provided by Amazon Web -Services, an Amazon Web Services Marketplace Partner, or another Amazon Web Services -account. For more information, see the Amazon Web Services PrivateLink Guide. +Creates a VPC endpoint. A VPC endpoint provides a private connection between the specified +VPC and the specified endpoint service. You can use an endpoint service provided by Amazon +Web Services, an Amazon Web Services Marketplace Partner, or another Amazon Web Services +account. For more information, see the Amazon Web Services PrivateLink User Guide. # Arguments -- `service_name`: The service name. -- `vpc_id`: The ID of the VPC for the endpoint. +- `service_name`: The name of the endpoint service. +- `vpc_id`: The ID of the VPC. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -9740,17 +9721,15 @@ end delete_security_group(params::Dict{String,<:Any}) Deletes a security group. If you attempt to delete a security group that is associated with -an instance, or is referenced by another security group, the operation fails with -InvalidGroup.InUse in EC2-Classic or DependencyViolation in EC2-VPC. We are retiring -EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, -see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. +an instance or network interface or is referenced by another security group, the operation +fails with DependencyViolation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"GroupId"`: The ID of the security group. Required for a nondefault VPC. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You can specify - either the security group name or the security group ID. For security groups in a - nondefault VPC, you must specify the security group ID. +- `"GroupId"`: The ID of the security group. +- `"GroupName"`: [Default VPC] The name of the security group. You can specify either the + security group name or the security group ID. For security groups in a nondefault VPC, you + must specify the security group ID. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -11581,18 +11560,14 @@ end describe_account_attributes(params::Dict{String,<:Any}) Describes attributes of your Amazon Web Services account. The following are the supported -account attributes: supported-platforms: Indicates whether your account can launch -instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. default-vpc: The ID of the -default VPC for your account, or none. max-instances: This attribute is no longer -supported. The returned value does not reflect your actual vCPU limit for running On-Demand -Instances. For more information, see On-Demand Instance Limits in the Amazon Elastic -Compute Cloud User Guide. vpc-max-security-groups-per-interface: The maximum number of -security groups that you can assign to a network interface. max-elastic-ips: The maximum -number of Elastic IP addresses that you can allocate for use with EC2-Classic. -vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can allocate for -use with EC2-VPC. We are retiring EC2-Classic on August 15, 2022. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon EC2 User Guide. +account attributes: default-vpc: The ID of the default VPC for your account, or none. +max-instances: This attribute is no longer supported. The returned value does not reflect +your actual vCPU limit for running On-Demand Instances. For more information, see On-Demand +Instance Limits in the Amazon Elastic Compute Cloud User Guide. max-elastic-ips: The +maximum number of Elastic IP addresses that you can allocate. supported-platforms: This +attribute is deprecated. vpc-max-elastic-ips: The maximum number of Elastic IP addresses +that you can allocate. vpc-max-security-groups-per-interface: The maximum number of +security groups that you can assign to a network interface. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -12103,25 +12078,21 @@ end describe_classic_link_instances() describe_classic_link_instances(params::Dict{String,<:Any}) -Describes one or more of your linked EC2-Classic instances. This request only returns -information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot use -this request to return information about other instances. We are retiring EC2-Classic. We -recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Describes one or more of your linked EC2-Classic instances. +This request only returns information about EC2-Classic instances linked to a VPC through +ClassicLink. You cannot use this request to return information about other instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. group-id - The ID of a VPC security group that's - associated with the instance. instance-id - The ID of the instance. tag:<key> - - The key/value combination of a tag assigned to the resource. Use the tag key in the filter - name and the tag value as the filter value. For example, to find all resources that have a - tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA - for the filter value. tag-key - The key of a tag assigned to the resource. Use this - filter to find all resources assigned a tag with a specific key, regardless of the tag - value. vpc-id - The ID of the VPC to which the instance is linked. vpc-id - The ID of - the VPC that the instance is linked to. -- `"InstanceId"`: One or more instance IDs. Must be instances linked to a VPC through - ClassicLink. +- `"Filter"`: The filters. group-id - The ID of a VPC security group that's associated + with the instance. instance-id - The ID of the instance. tag:<key> - The + key/value combination of a tag assigned to the resource. Use the tag key in the filter name + and the tag value as the filter value. For example, to find all resources that have a tag + with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for + the filter value. tag-key - The key of a tag assigned to the resource. Use this filter + to find all resources assigned a tag with a specific key, regardless of the tag value. + vpc-id - The ID of the VPC to which the instance is linked. +- `"InstanceId"`: The instance IDs. Must be instances linked to a VPC through ClassicLink. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -12500,21 +12471,21 @@ end describe_dhcp_options(params::Dict{String,<:Any}) Describes one or more of your DHCP options sets. For more information, see DHCP options -sets in the Amazon Virtual Private Cloud User Guide. +sets in the Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DhcpOptionsId"`: The IDs of one or more DHCP options sets. Default: Describes all your DHCP options sets. -- `"Filter"`: One or more filters. dhcp-options-id - The ID of a DHCP options set. - key - The key for one of the options (for example, domain-name). value - The value for - one of the options. owner-id - The ID of the Amazon Web Services account that owns the - DHCP options set. tag:<key> - The key/value combination of a tag assigned to the - resource. Use the tag key in the filter name and the tag value as the filter value. For - example, to find all resources that have a tag with the key Owner and the value TeamA, - specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key - of a tag assigned to the resource. Use this filter to find all resources assigned a tag - with a specific key, regardless of the tag value. +- `"Filter"`: The filters. dhcp-options-id - The ID of a DHCP options set. key - The + key for one of the options (for example, domain-name). value - The value for one of the + options. owner-id - The ID of the Amazon Web Services account that owns the DHCP options + set. tag:<key> - The key/value combination of a tag assigned to the resource. Use + the tag key in the filter name and the tag value as the filter value. For example, to find + all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for + the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to + the resource. Use this filter to find all resources assigned a tag with a specific key, + regardless of the tag value. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -12551,13 +12522,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"EgressOnlyInternetGatewayId"`: One or more egress-only internet gateway IDs. -- `"Filter"`: One or more filters. tag:<key> - The key/value combination of a tag - assigned to the resource. Use the tag key in the filter name and the tag value as the - filter value. For example, to find all resources that have a tag with the key Owner and the - value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. - tag-key - The key of a tag assigned to the resource. Use this filter to find all resources - assigned a tag with a specific key, regardless of the tag value. +- `"EgressOnlyInternetGatewayId"`: The IDs of the egress-only internet gateways. +- `"Filter"`: The filters. tag:<key> - The key/value combination of a tag assigned + to the resource. Use the tag key in the filter name and the tag value as the filter value. + For example, to find all resources that have a tag with the key Owner and the value TeamA, + specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key + of a tag assigned to the resource. Use this filter to find all resources assigned a tag + with a specific key, regardless of the tag value. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -13374,9 +13345,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you specify self or your own Amazon Web Services account ID, AMIs shared with your account are returned. In addition, AMIs that are shared with the organization or OU of which you are member are also returned. If you specify all, all public AMIs are returned. -- `"Filter"`: The filters. architecture - The image architecture (i386 | x86_64 | - arm64). block-device-mapping.delete-on-termination - A Boolean value that indicates - whether the Amazon EBS volume is deleted on instance termination. +- `"Filter"`: The filters. architecture - The image architecture (i386 | x86_64 | arm64 + | x86_64_mac | arm64_mac). block-device-mapping.delete-on-termination - A Boolean value + that indicates whether the Amazon EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS volume. block-device-mapping.volume-size - The volume @@ -13883,11 +13854,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Filter"`: One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance - type (true | false). burstable-performance-supported - Indicates whether it is a - burstable performance instance type (true | false). current-generation - Indicates - whether this instance type is the latest generation instance type of an instance family - (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline - bandwidth performance for an EBS-optimized instance type, in Mbps. + type (true | false). burstable-performance-supported - Indicates whether the instance + type is a burstable performance T instance type (true | false). current-generation - + Indicates whether this instance type is the latest generation instance type of an instance + family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The + baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput @@ -13929,8 +13900,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, \"25 - Gigabit\"). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | - x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. + Gigabit\"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported + (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported + (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM + version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | + i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in + GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot). supported-virtualization-type - The virtualization type (hvm | @@ -14131,9 +14106,9 @@ Describes one or more of your internet gateways. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. attachment.state - The current state of the - attachment between the gateway and the VPC (available). Present only if a VPC is attached. - attachment.vpc-id - The ID of an attached VPC. internet-gateway-id - The ID of the +- `"Filter"`: The filters. attachment.state - The current state of the attachment + between the gateway and the VPC (available). Present only if a VPC is attached. + attachment.vpc-id - The ID of an attached VPC. internet-gateway-id - The ID of the Internet gateway. owner-id - The ID of the Amazon Web Services account that owns the internet gateway. tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For @@ -14149,7 +14124,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"internetGatewayId"`: One or more internet gateway IDs. Default: Describes all your +- `"internetGatewayId"`: The IDs of the internet gateways. Default: Describes all your internet gateways. """ function describe_internet_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -14852,19 +14827,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Filter"`: One or more filters. nat-gateway-id - The ID of the NAT gateway. state - - The state of the NAT gateway (pending | failed | available | deleting | deleted). - subnet-id - The ID of the subnet in which the NAT gateway resides. tag:<key> - The - key/value combination of a tag assigned to the resource. Use the tag key in the filter name - and the tag value as the filter value. For example, to find all resources that have a tag - with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - the filter value. tag-key - The key of a tag assigned to the resource. Use this filter - to find all resources assigned a tag with a specific key, regardless of the tag value. +- `"Filter"`: The filters. nat-gateway-id - The ID of the NAT gateway. state - The + state of the NAT gateway (pending | failed | available | deleting | deleted). subnet-id + - The ID of the subnet in which the NAT gateway resides. tag:<key> - The key/value + combination of a tag assigned to the resource. Use the tag key in the filter name and the + tag value as the filter value. For example, to find all resources that have a tag with the + key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the + filter value. tag-key - The key of a tag assigned to the resource. Use this filter to + find all resources assigned a tag with a specific key, regardless of the tag value. vpc-id - The ID of the VPC in which the NAT gateway resides. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. -- `"NatGatewayId"`: One or more NAT gateway IDs. +- `"NatGatewayId"`: The IDs of the NAT gateways. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. """ @@ -14889,12 +14864,12 @@ end describe_network_acls(params::Dict{String,<:Any}) Describes one or more of your network ACLs. For more information, see Network ACLs in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. association.association-id - The ID of an association - ID for the ACL. association.network-acl-id - The ID of the network ACL involved in the +- `"Filter"`: The filters. association.association-id - The ID of an association ID for + the ACL. association.network-acl-id - The ID of the network ACL involved in the association. association.subnet-id - The ID of the subnet involved in the association. default - Indicates whether the ACL is the default network ACL for the VPC. entry.cidr - The IPv4 CIDR range specified in the entry. entry.icmp.code - The ICMP code specified @@ -14917,7 +14892,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. -- `"NetworkAclId"`: One or more network ACL IDs. Default: Describes all your network ACLs. +- `"NetworkAclId"`: The IDs of the network ACLs. Default: Describes all your network ACLs. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. - `"dryRun"`: Checks whether you have the required permissions for the action, without @@ -15753,12 +15728,12 @@ end Describes one or more of your route tables. Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID -for implicit associations. For more information, see Route tables in the Amazon Virtual -Private Cloud User Guide. +for implicit associations. For more information, see Route tables in the Amazon VPC User +Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. association.route-table-association-id - The ID of an +- `"Filter"`: The filters. association.route-table-association-id - The ID of an association ID for the route table. association.route-table-id - The ID of the route table involved in the association. association.subnet-id - The ID of the subnet involved in the association. association.main - Indicates whether the route table is the main @@ -15792,7 +15767,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"RouteTableId"`: One or more route table IDs. Default: Describes all your route tables. +- `"RouteTableId"`: The IDs of the route tables. Default: Describes all your route tables. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -15923,8 +15898,8 @@ end describe_security_group_references(item) describe_security_group_references(item, params::Dict{String,<:Any}) -[VPC only] Describes the VPCs on the other side of a VPC peering connection that are -referencing the security groups you've specified in this request. +Describes the VPCs on the other side of a VPC peering connection that are referencing the +security groups you've specified in this request. # Arguments - `item`: The IDs of the security groups in your account. @@ -16002,13 +15977,7 @@ end describe_security_groups() describe_security_groups(params::Dict{String,<:Any}) -Describes the specified security groups or all of your security groups. A security group is -for use with instances either in the EC2-Classic platform or in a specific VPC. For more -information, see Amazon EC2 security groups in the Amazon Elastic Compute Cloud User Guide -and Security groups for your VPC in the Amazon Virtual Private Cloud User Guide. We are -retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more -information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User -Guide. +Describes the specified security groups or all of your security groups. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -16050,10 +16019,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys VPC specified when the security group was created. - `"GroupId"`: The IDs of the security groups. Required for security groups in a nondefault VPC. Default: Describes all of your security groups. -- `"GroupName"`: [EC2-Classic and default VPC only] The names of the security groups. You - can specify either the security group name or the security group ID. For security groups in - a nondefault VPC, use the group-name filter to describe security groups by name. Default: - Describes all of your security groups. +- `"GroupName"`: [Default VPC] The names of the security groups. You can specify either the + security group name or the security group ID. Default: Describes all of your security + groups. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. This value can be between 5 and 1000. If this parameter is not specified, then all items are returned. For @@ -16568,10 +16536,10 @@ end describe_stale_security_groups(vpc_id) describe_stale_security_groups(vpc_id, params::Dict{String,<:Any}) -[VPC only] Describes the stale security group rules for security groups in a specified VPC. -Rules are stale when they reference a deleted security group in the same VPC or in a peer -VPC, or if they reference a security group in a peer VPC for which the VPC peering -connection has been deleted. +Describes the stale security group rules for security groups in a specified VPC. Rules are +stale when they reference a deleted security group in the same VPC or in a peer VPC, or if +they reference a security group in a peer VPC for which the VPC peering connection has been +deleted. # Arguments - `vpc_id`: The ID of the VPC. @@ -16659,25 +16627,25 @@ end describe_subnets() describe_subnets(params::Dict{String,<:Any}) -Describes one or more of your subnets. For more information, see Your VPC and subnets in -the Amazon Virtual Private Cloud User Guide. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. availability-zone - The Availability Zone for the - subnet. You can also use availabilityZone as the filter name. availability-zone-id - The - ID of the Availability Zone for the subnet. You can also use availabilityZoneId as the - filter name. available-ip-address-count - The number of IPv4 addresses in the subnet - that are available. cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you - specify must exactly match the subnet's CIDR block for information to be returned for the - subnet. You can also use cidr or cidrBlock as the filter names. customer-owned-ipv4-pool - - The customer-owned IPv4 address pool associated with the subnet. default-for-az - - Indicates whether this is the default subnet for the Availability Zone (true | false). You - can also use defaultForAz as the filter name. enable-dns64 - Indicates whether DNS - queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic - IPv6 addresses for IPv4-only destinations. enable-lni-at-device-index - Indicates the - device position for local network interfaces in this subnet. For example, 1 indicates local - network interfaces in this subnet are the secondary network interface (eth1). +Describes one or more of your subnets. For more information, see Subnets in the Amazon VPC +User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filter"`: The filters. availability-zone - The Availability Zone for the subnet. You + can also use availabilityZone as the filter name. availability-zone-id - The ID of the + Availability Zone for the subnet. You can also use availabilityZoneId as the filter name. + available-ip-address-count - The number of IPv4 addresses in the subnet that are + available. cidr-block - The IPv4 CIDR block of the subnet. The CIDR block you specify + must exactly match the subnet's CIDR block for information to be returned for the subnet. + You can also use cidr or cidrBlock as the filter names. customer-owned-ipv4-pool - The + customer-owned IPv4 address pool associated with the subnet. default-for-az - Indicates + whether this is the default subnet for the Availability Zone (true | false). You can also + use defaultForAz as the filter name. enable-dns64 - Indicates whether DNS queries made + to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses + for IPv4-only destinations. enable-lni-at-device-index - Indicates the device position + for local network interfaces in this subnet. For example, 1 indicates local network + interfaces in this subnet are the secondary network interface (eth1). ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with the subnet. ipv6-cidr-block-association.association-id - An association ID for an IPv6 CIDR block associated with the subnet. ipv6-cidr-block-association.state - The state of an @@ -16709,7 +16677,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"SubnetId"`: One or more subnet IDs. Default: Describes all your subnets. +- `"SubnetId"`: The IDs of the subnets. Default: Describes all your subnets. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -17823,20 +17791,18 @@ end describe_vpc_classic_link() describe_vpc_classic_link(params::Dict{String,<:Any}) -Describes the ClassicLink status of one or more VPCs. We are retiring EC2-Classic. We -recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Describes the ClassicLink status of the specified VPCs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. is-classic-link-enabled - Whether the VPC is enabled - for ClassicLink (true | false). tag:<key> - The key/value combination of a tag +- `"Filter"`: The filters. is-classic-link-enabled - Whether the VPC is enabled for + ClassicLink (true | false). tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. -- `"VpcId"`: One or more VPCs for which you want to describe the ClassicLink status. +- `"VpcId"`: The VPCs for which you want to describe the ClassicLink status. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -17861,18 +17827,15 @@ end describe_vpc_classic_link_dns_support() describe_vpc_classic_link_dns_support(params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Describes the ClassicLink DNS support status of one or more VPCs. If enabled, -the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when -addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of -an instance in a VPC resolves to its private IP address when addressed from a linked -EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. + This action is deprecated. Describes the ClassicLink DNS support status of one or more +VPCs. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its private +IP address when addressed from an instance in the VPC to which it's linked. Similarly, the +DNS hostname of an instance in a VPC resolves to its private IP address when addressed from +a linked EC2-Classic instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"VpcIds"`: One or more VPC IDs. +- `"VpcIds"`: The IDs of the VPCs. - `"maxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -18181,8 +18144,8 @@ Describes one or more of your VPC peering connections. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. accepter-vpc-info.cidr-block - The IPv4 CIDR block of - the accepter VPC. accepter-vpc-info.owner-id - The ID of the Amazon Web Services account +- `"Filter"`: The filters. accepter-vpc-info.cidr-block - The IPv4 CIDR block of the + accepter VPC. accepter-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the accepter VPC. accepter-vpc-info.vpc-id - The ID of the accepter VPC. expiration-time - The expiration date and time for the VPC peering connection. requester-vpc-info.cidr-block - The IPv4 CIDR block of the requester's VPC. @@ -18203,7 +18166,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"VpcPeeringConnectionId"`: One or more VPC peering connection IDs. Default: Describes +- `"VpcPeeringConnectionId"`: The IDs of the VPC peering connections. Default: Describes all your VPC peering connections. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required @@ -18237,10 +18200,10 @@ Describes one or more of your VPCs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: One or more filters. cidr - The primary IPv4 CIDR block of the VPC. The - CIDR block you specify must exactly match the VPC's CIDR block for information to be - returned for the VPC. Must contain the slash followed by one or two digits (for example, - /28). cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC. +- `"Filter"`: The filters. cidr - The primary IPv4 CIDR block of the VPC. The CIDR block + you specify must exactly match the VPC's CIDR block for information to be returned for the + VPC. Must contain the slash followed by one or two digits (for example, /28). + cidr-block-association.cidr-block - An IPv4 CIDR block associated with the VPC. cidr-block-association.association-id - The association ID for an IPv4 CIDR block associated with the VPC. cidr-block-association.state - The state of an IPv4 CIDR block associated with the VPC. dhcp-options-id - The ID of a set of DHCP options. @@ -18262,7 +18225,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Pagination. - `"NextToken"`: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. -- `"VpcId"`: One or more VPC IDs. Default: Describes all your VPCs. +- `"VpcId"`: The IDs of the VPCs. Default: Describes all your VPCs. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18374,11 +18337,9 @@ end detach_classic_link_vpc(instance_id, vpc_id) detach_classic_link_vpc(instance_id, vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Unlinks (detaches) a linked EC2-Classic instance from a VPC. After the -instance has been unlinked, the VPC security groups are no longer associated with it. An -instance is automatically unlinked from a VPC when it's stopped. + This action is deprecated. Unlinks (detaches) a linked EC2-Classic instance from a VPC. +After the instance has been unlinked, the VPC security groups are no longer associated with +it. An instance is automatically unlinked from a VPC when it's stopped. # Arguments - `instance_id`: The ID of the instance to unlink from the VPC. @@ -19106,10 +19067,8 @@ end disable_vpc_classic_link(vpc_id) disable_vpc_classic_link(vpc_id, params::Dict{String,<:Any}) -Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has -EC2-Classic instances linked to it. We are retiring EC2-Classic. We recommend that you -migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a -VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Disables ClassicLink for a VPC. You cannot disable ClassicLink +for a VPC that has EC2-Classic instances linked to it. # Arguments - `vpc_id`: The ID of the VPC. @@ -19143,12 +19102,10 @@ end disable_vpc_classic_link_dns_support() disable_vpc_classic_link_dns_support(params::Dict{String,<:Any}) -Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve to public IP -addresses when addressed between a linked EC2-Classic instance and instances in the VPC to -which it's linked. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. You must specify a VPC ID in the request. We are retiring EC2-Classic. -We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate -from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Disables ClassicLink DNS support for a VPC. If disabled, DNS +hostnames resolve to public IP addresses when addressed between a linked EC2-Classic +instance and instances in the VPC to which it's linked. You must specify a VPC ID in the +request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19460,18 +19417,18 @@ end Disassociates secondary Elastic IP addresses (EIPs) from a public NAT gateway. You cannot disassociate your primary EIP. For more information, see Edit secondary IP address -associations in the Amazon Virtual Private Cloud User Guide. While disassociating is in -progress, you cannot associate/disassociate additional EIPs while the connections are being -drained. You are, however, allowed to delete the NAT gateway. An EIP will only be released -at the end of MaxDrainDurationSeconds. The EIPs stay associated and support the existing -connections but do not support any new connections (new connections are distributed across -the remaining associated EIPs). As the existing connections drain out, the EIPs (and the -corresponding private IPs mapped to them) get released. +associations in the Amazon VPC User Guide. While disassociating is in progress, you cannot +associate/disassociate additional EIPs while the connections are being drained. You are, +however, allowed to delete the NAT gateway. An EIP is released only at the end of +MaxDrainDurationSeconds. It stays associated and supports the existing connections but does +not support any new connections (new connections are distributed across the remaining +associated EIPs). As the existing connections drain out, the EIPs (and the corresponding +private IP addresses mapped to them) are released. # Arguments - `association_id`: The association IDs of EIPs that have been associated with the NAT gateway. -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19521,7 +19478,7 @@ end Disassociates a subnet or gateway from a route table. After you perform this action, the subnet no longer uses the routes in the route table. Instead, it uses the routes in the VPC's main route table. For more information about route tables, see Route tables in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `association_id`: The association ID representing the current association between the @@ -20400,14 +20357,11 @@ end enable_vpc_classic_link(vpc_id) enable_vpc_classic_link(vpc_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Enables a VPC for ClassicLink. You can then link EC2-Classic instances to your -ClassicLink-enabled VPC to allow communication over private IP addresses. You cannot enable -your VPC for ClassicLink if any of your VPC route tables have existing routes for address -ranges within the 10.0.0.0/8 IP address range, excluding local routes for VPCs in the -10.0.0.0/16 and 10.1.0.0/16 IP address ranges. For more information, see ClassicLink in the -Amazon Elastic Compute Cloud User Guide. + This action is deprecated. Enables a VPC for ClassicLink. You can then link EC2-Classic +instances to your ClassicLink-enabled VPC to allow communication over private IP addresses. +You cannot enable your VPC for ClassicLink if any of your VPC route tables have existing +routes for address ranges within the 10.0.0.0/8 IP address range, excluding local routes +for VPCs in the 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. # Arguments - `vpc_id`: The ID of the VPC. @@ -20441,14 +20395,11 @@ end enable_vpc_classic_link_dns_support() enable_vpc_classic_link_dns_support(params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, -the DNS hostname of a linked EC2-Classic instance resolves to its private IP address when -addressed from an instance in the VPC to which it's linked. Similarly, the DNS hostname of -an instance in a VPC resolves to its private IP address when addressed from a linked -EC2-Classic instance. For more information, see ClassicLink in the Amazon Elastic Compute -Cloud User Guide. You must specify a VPC ID in the request. + This action is deprecated. Enables a VPC to support DNS hostname resolution for +ClassicLink. If enabled, the DNS hostname of a linked EC2-Classic instance resolves to its +private IP address when addressed from an instance in the VPC to which it's linked. +Similarly, the DNS hostname of an instance in a VPC resolves to its private IP address when +addressed from a linked EC2-Classic instance. You must specify a VPC ID in the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21125,6 +21076,8 @@ the template to do the following: Create a table in Athena that maps fields to log format Create a Lambda function that updates the table with new partitions on a daily, weekly, or monthly basis Create a table partitioned between two timestamps in the past Create a set of named queries in Athena that you can use to get started quickly +GetFlowLogsIntegrationTemplate does not support integration between Amazon Web Services +Transit Gateway Flow Logs and Amazon Athena. # Arguments - `config_delivery_s3_destination_arn`: To store the CloudFormation template in Amazon S3, @@ -22909,12 +22862,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys AMI. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. The KMS key identifier may be provided in - any of the following formats: Key ID Key alias. The alias ARN contains the arn:aws:kms - namespace, followed by the Region of the key, the Amazon Web Services account ID of the key - owner, the alias namespace, and then the key alias. For example, - arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. ARN using key ID. The ID ARN - contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web - Services account ID of the key owner, the key namespace, and then the key ID. For example, + any of the following formats: Key ID Key alias ARN using key ID. The ID ARN contains + the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services + account ID of the key owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias namespace, and then the @@ -22932,7 +22882,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Linux operating system. To use BYOL, you must have existing licenses with rights to use these licenses in a third party cloud, such as Amazon Web Services. For more information, see Prerequisites in the VM Import/Export User Guide. -- `"Platform"`: The operating system of the virtual machine. Valid values: Windows | Linux +- `"Platform"`: The operating system of the virtual machine. If you import a VM that is + compatible with Unified Extensible Firmware Interface (UEFI) using an EBS snapshot, you + must specify a value for the platform. Valid values: Windows | Linux - `"RoleName"`: The name of the role to use when not using the default role, 'vmimport'. - `"TagSpecification"`: The tags to apply to the import image task during creation. - `"UsageOperation"`: The usage operation value. For more information, see Licensing @@ -22953,12 +22905,14 @@ end import_instance(platform) import_instance(platform, params::Dict{String,<:Any}) -Creates an import instance task using metadata from the specified disk image. This API -action supports only single-volume VMs. To import multi-volume VMs, use ImportImage -instead. This API action is not supported by the Command Line Interface (CLI). For -information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to -Amazon EC2 in the Amazon EC2 CLI Reference PDF file. For information about the import -manifest referenced by this API action, see VM Import Manifest. + We recommend that you use the ImportImage API. For more information, see Importing a VM +as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import +instance task using metadata from the specified disk image. This API action is not +supported by the Command Line Interface (CLI). For information about using the Amazon EC2 +CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference +PDF file. This API action supports only single-volume VMs. To import multi-volume VMs, use +ImportImage instead. For information about the import manifest referenced by this API +action, see VM Import Manifest. # Arguments - `platform`: The instance operating system. @@ -23075,12 +23029,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys snapshot. This parameter is only required if you want to use a non-default KMS key; if this parameter is not specified, the default KMS key for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set. The KMS key identifier may be provided in - any of the following formats: Key ID Key alias. The alias ARN contains the arn:aws:kms - namespace, followed by the Region of the key, the Amazon Web Services account ID of the key - owner, the alias namespace, and then the key alias. For example, - arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. ARN using key ID. The ID ARN - contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web - Services account ID of the key owner, the key namespace, and then the key ID. For example, + any of the following formats: Key ID Key alias ARN using key ID. The ID ARN contains + the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services + account ID of the key owner, the key namespace, and then the key ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed by the Region of the key, the Amazon Web Services account ID of the key owner, the alias namespace, and then the @@ -24398,7 +24349,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this parameter is not specified, the existing state is maintained. If you specify a value of disabled, you cannot access your instance metadata. - `"HttpProtocolIpv6"`: Enables or disables the IPv6 endpoint for the instance metadata - service. This setting applies only if you have enabled the HTTP metadata endpoint. + service. Applies only if you enabled the HTTP metadata endpoint. - `"HttpPutResponseHopLimit"`: The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. If no parameter is specified, the existing state is maintained. Possible values: @@ -24470,14 +24421,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys placement groups, the instance must have a tenancy of default or dedicated. To remove an instance from a placement group, specify an empty string (\"\"). - `"HostResourceGroupArn"`: The ARN of the host resource group in which to place the - instance. + instance. The instance must have a tenancy of host to specify this parameter. - `"PartitionNumber"`: The number of the partition in which to place the instance. Valid only if the placement group strategy is set to partition. - `"affinity"`: The affinity setting for the instance. - `"hostId"`: The ID of the Dedicated Host with which to associate the instance. -- `"tenancy"`: The tenancy for the instance. For T3 instances, you can't change the - tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these - unsupported tenancy changes results in the InvalidTenancy error code. +- `"tenancy"`: The tenancy for the instance. For T3 instances, you must launch the + instance on a Dedicated Host to use a tenancy of host. You can't change the tenancy from + host to dedicated or default. Attempting to make one of these unsupported tenancy changes + results in an InvalidRequest error code. """ function modify_instance_placement( instanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -26527,25 +26479,17 @@ end modify_vpc_peering_connection_options(vpc_peering_connection_id) modify_vpc_peering_connection_options(vpc_peering_connection_id, params::Dict{String,<:Any}) - We are retiring EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For -more information, see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud -User Guide. Modifies the VPC peering connection options on one side of a VPC peering -connection. You can do the following: Enable/disable communication over the peering -connection between an EC2-Classic instance that's linked to your VPC (using ClassicLink) -and instances in the peer VPC. Enable/disable communication over the peering connection -between instances in your VPC and an EC2-Classic instance that's linked to the peer VPC. -Enable/disable the ability to resolve public DNS hostnames to private IP addresses when -queried from instances in the peer VPC. If the peered VPCs are in the same Amazon Web -Services account, you can enable DNS resolution for queries from the local VPC. This -ensures that queries from the local VPC resolve to private IP addresses in the peer VPC. -This option is not available if the peered VPCs are in different Amazon Web Services -accounts or different Regions. For peered VPCs in different Amazon Web Services accounts, -each Amazon Web Services account owner must initiate a separate request to modify the -peering connection options. For inter-region peering connections, you must use the Region -for the requester VPC to modify the requester VPC peering options and the Region for the -accepter VPC to modify the accepter VPC peering options. To verify which VPCs are the -accepter and the requester for a VPC peering connection, use the -DescribeVpcPeeringConnections command. +Modifies the VPC peering connection options on one side of a VPC peering connection. If the +peered VPCs are in the same Amazon Web Services account, you can enable DNS resolution for +queries from the local VPC. This ensures that queries from the local VPC resolve to private +IP addresses in the peer VPC. This option is not available if the peered VPCs are in +different Amazon Web Services accounts or different Regions. For peered VPCs in different +Amazon Web Services accounts, each Amazon Web Services account owner must initiate a +separate request to modify the peering connection options. For inter-region peering +connections, you must use the Region for the requester VPC to modify the requester VPC +peering options and the Region for the accepter VPC to modify the accepter VPC peering +options. To verify which VPCs are the accepter and the requester for a VPC peering +connection, use the DescribeVpcPeeringConnections command. # Arguments - `vpc_peering_connection_id`: The ID of the VPC peering connection. @@ -26598,7 +26542,7 @@ tenancy attribute of a VPC to default only. You cannot change the instance tenan attribute to dedicated. After you modify the tenancy of the VPC, any new instances that you launch into the VPC have a tenancy of default, unless you specify otherwise during launch. The tenancy of any existing instances in the VPC is not affected. For more information, see -Dedicated Instances in the Amazon Elastic Compute Cloud User Guide. +Dedicated Instances in the Amazon EC2 User Guide. # Arguments - `instance_tenancy`: The instance tenancy attribute for the VPC. @@ -28110,8 +28054,7 @@ end Changes which network ACL a subnet is associated with. By default when you create a subnet, it's automatically associated with the default network ACL. For more information, see -Network ACLs in the Amazon Virtual Private Cloud User Guide. This is an idempotent -operation. +Network ACLs in the Amazon VPC User Guide. This is an idempotent operation. # Arguments - `association_id`: The ID of the current association between the original network ACL and @@ -28161,7 +28104,7 @@ end replace_network_acl_entry(egress, network_acl_id, protocol, rule_action, rule_number, params::Dict{String,<:Any}) Replaces an entry (rule) in a network ACL. For more information, see Network ACLs in the -Amazon Virtual Private Cloud User Guide. +Amazon VPC User Guide. # Arguments - `egress`: Indicates whether to replace the egress rule. Default: If no value is @@ -28247,7 +28190,7 @@ end Replaces an existing route within a route table in a VPC. You must specify either a destination CIDR block or a prefix list ID. You must also specify exactly one of the resources from the parameter list, or reset the local route to its default target. For more -information, see Route tables in the Amazon Virtual Private Cloud User Guide. +information, see Route tables in the Amazon VPC User Guide. # Arguments - `route_table_id`: The ID of the route table. @@ -28307,9 +28250,9 @@ end Changes the route table associated with a given subnet, internet gateway, or virtual private gateway in a VPC. After the operation completes, the subnet or gateway uses the routes in the new route table. For more information about route tables, see Route tables in -the Amazon Virtual Private Cloud User Guide. You can also use this operation to change -which table is the main route table in the VPC. Specify the main route table's association -ID and the route table ID of the new main route table. +the Amazon VPC User Guide. You can also use this operation to change which table is the +main route table in the VPC. Specify the main route table's association ID and the route +table ID of the new main route table. # Arguments - `association_id`: The association ID. @@ -29278,19 +29221,19 @@ end revoke_security_group_egress(group_id) revoke_security_group_egress(group_id, params::Dict{String,<:Any}) -[VPC only] Removes the specified outbound (egress) rules from a security group for EC2-VPC. -This action does not apply to security groups for use in EC2-Classic. You can specify rules -using either rule IDs or security group rule properties. If you use rule properties, the -values that you specify (for example, ports) must match the existing rule's values exactly. -Each rule has a protocol, from and to ports, and destination (CIDR range, security group, -or prefix list). For the TCP and UDP protocols, you must also specify the destination port -or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If -the security group rule has a description, you do not need to specify the description to -revoke the rule. [Default VPC] If the values you specify do not match the existing rule's -values, no error is returned, and the output describes the security group rules that were -not revoked. Amazon Web Services recommends that you describe the security group to verify -that the rules were removed. Rule changes are propagated to instances within the security -group as quickly as possible. However, a small delay might occur. +Removes the specified outbound (egress) rules from the specified security group. You can +specify rules using either rule IDs or security group rule properties. If you use rule +properties, the values that you specify (for example, ports) must match the existing rule's +values exactly. Each rule has a protocol, from and to ports, and destination (CIDR range, +security group, or prefix list). For the TCP and UDP protocols, you must also specify the +destination port or range of ports. For the ICMP protocol, you must also specify the ICMP +type and code. If the security group rule has a description, you do not need to specify the +description to revoke the rule. For a default VPC, if the values you specify do not match +the existing rule's values, no error is returned, and the output describes the security +group rules that were not revoked. Amazon Web Services recommends that you describe the +security group to verify that the rules were removed. Rule changes are propagated to +instances within the security group as quickly as possible. However, a small delay might +occur. # Arguments - `group_id`: The ID of the security group. @@ -29345,13 +29288,11 @@ Each rule has a protocol, from and to ports, and source (CIDR range, security gr prefix list). For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not need to specify the description to revoke -the rule. [EC2-Classic, default VPC] If the values you specify do not match the existing -rule's values, no error is returned, and the output describes the security group rules that -were not revoked. Amazon Web Services recommends that you describe the security group to -verify that the rules were removed. Rule changes are propagated to instances within the -security group as quickly as possible. However, a small delay might occur. We are retiring -EC2-Classic. We recommend that you migrate from EC2-Classic to a VPC. For more information, -see Migrate from EC2-Classic to a VPC in the Amazon Elastic Compute Cloud User Guide. +the rule. For a default VPC, if the values you specify do not match the existing rule's +values, no error is returned, and the output describes the security group rules that were +not revoked. Amazon Web Services recommends that you describe the security group to verify +that the rules were removed. Rule changes are propagated to instances within the security +group as quickly as possible. However, a small delay might occur. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -29359,28 +29300,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys source security group. - `"FromPort"`: If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP, this is the type number. A value of -1 indicates all ICMP types. -- `"GroupId"`: The ID of the security group. You must specify either the security group ID - or the security group name in the request. For security groups in a nondefault VPC, you - must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. +- `"GroupId"`: The ID of the security group. +- `"GroupName"`: [Default VPC] The name of the security group. You must specify either the + security group ID or the security group name in the request. For security groups in a + nondefault VPC, you must specify the security group ID. - `"IpPermissions"`: The sets of IP permissions. You can't specify a source security group and a CIDR IP address range in the same set of permissions. - `"IpProtocol"`: The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers). Use -1 to specify all. - `"SecurityGroupRuleId"`: The IDs of the security group rules. -- `"SourceSecurityGroupName"`: [EC2-Classic, default VPC] The name of the source security - group. You can't specify this parameter in combination with the following parameters: the - CIDR IP address range, the start of the port range, the IP protocol, and the end of the - port range. For EC2-VPC, the source security group must be in the same VPC. To revoke a - specific rule for an IP protocol and port range, use a set of IP permissions instead. -- `"SourceSecurityGroupOwnerId"`: [EC2-Classic] The Amazon Web Services account ID of the - source security group, if the source security group is in a different account. You can't - specify this parameter in combination with the following parameters: the CIDR IP address - range, the IP protocol, the start of the port range, and the end of the port range. To - revoke a specific rule for an IP protocol and port range, use a set of IP permissions - instead. +- `"SourceSecurityGroupName"`: [Default VPC] The name of the source security group. You + can't specify this parameter in combination with the following parameters: the CIDR IP + address range, the start of the port range, the IP protocol, and the end of the port range. + The source security group must be in the same VPC. To revoke a specific rule for an IP + protocol and port range, use a set of IP permissions instead. +- `"SourceSecurityGroupOwnerId"`: Not supported. - `"ToPort"`: If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP, this is the code. A value of -1 indicates all ICMP codes. - `"dryRun"`: Checks whether you have the required permissions for the action, without @@ -29476,19 +29410,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. - `"EnclaveOptions"`: Indicates whether the instance is enabled for Amazon Web Services - Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in + Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. -- `"HibernationOptions"`: Indicates whether an instance is enabled for hibernation. For - more information, see Hibernate your instance in the Amazon EC2 User Guide. You can't - enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. +- `"HibernationOptions"`: Indicates whether an instance is enabled for hibernation. This + parameter is valid only if the instance meets the hibernation prerequisites. For more + information, see Hibernate your instance in the Amazon EC2 User Guide. You can't enable + hibernation and Amazon Web Services Nitro Enclaves on the same instance. - `"ImageId"`: The ID of the AMI. An AMI ID is required to launch an instance and must be specified here or in a launch template. - `"InstanceMarketOptions"`: The market (purchasing) option for the instances. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop. - `"InstanceType"`: The instance type. For more information, see Instance types in the - Amazon EC2 User Guide. Default: m1.small + Amazon EC2 User Guide. - `"Ipv6Address"`: The IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a @@ -29514,7 +29449,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Monitoring"`: Specifies whether detailed monitoring is enabled for the instance. - `"Placement"`: The placement for the instance. - `"PrivateDnsNameOptions"`: The options for the instance hostname. The default values are - inherited from the subnet. + inherited from the subnet. Applies only if creating a network interface, not attaching an + existing one. - `"RamdiskId"`: The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the Amazon Web Services Resource Center and @@ -30383,16 +30319,16 @@ end Unassigns secondary private IPv4 addresses from a private NAT gateway. You cannot unassign your primary private IP. For more information, see Edit secondary IP address associations -in the Amazon Virtual Private Cloud User Guide. While unassigning is in progress, you -cannot assign/unassign additional IP addresses while the connections are being drained. You -are, however, allowed to delete the NAT gateway. A private IP address will only be released -at the end of MaxDrainDurationSeconds. The private IP addresses stay associated and support -the existing connections but do not support any new connections (new connections are -distributed across the remaining assigned private IP address). After the existing -connections drain out, the private IP addresses get released. +in the Amazon VPC User Guide. While unassigning is in progress, you cannot assign/unassign +additional IP addresses while the connections are being drained. You are, however, allowed +to delete the NAT gateway. A private IP address will only be released at the end of +MaxDrainDurationSeconds. The private IP addresses stay associated and support the existing +connections, but do not support any new connections (new connections are distributed across +the remaining assigned private IP address). After the existing connections drain out, the +private IP addresses are released. # Arguments -- `nat_gateway_id`: The NAT gateway ID. +- `nat_gateway_id`: The ID of the NAT gateway. - `private_ip_address`: The private IPv4 addresses you want to unassign. # Optional Parameters @@ -30481,10 +30417,10 @@ end update_security_group_rule_descriptions_egress() update_security_group_rule_descriptions_egress(params::Dict{String,<:Any}) -[VPC only] Updates the description of an egress (outbound) security group rule. You can -replace an existing description, or add a description to a rule that did not have one -previously. You can remove a description for a security group rule by omitting the -description parameter in the request. +Updates the description of an egress (outbound) security group rule. You can replace an +existing description, or add a description to a rule that did not have one previously. You +can remove a description for a security group rule by omitting the description parameter in +the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -30495,7 +30431,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID. - `"GroupName"`: [Default VPC] The name of the security group. You must specify either the - security group ID or the security group name in the request. + security group ID or the security group name. - `"IpPermissions"`: The IP permissions for the security group rule. You must specify either the IP permissions or the description. - `"SecurityGroupRuleDescription"`: The description for the egress security group rules. @@ -30538,13 +30474,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GroupId"`: The ID of the security group. You must specify either the security group ID or the security group name in the request. For security groups in a nondefault VPC, you must specify the security group ID. -- `"GroupName"`: [EC2-Classic, default VPC] The name of the security group. You must - specify either the security group ID or the security group name in the request. For - security groups in a nondefault VPC, you must specify the security group ID. +- `"GroupName"`: [Default VPC] The name of the security group. You must specify either the + security group ID or the security group name. For security groups in a nondefault VPC, you + must specify the security group ID. - `"IpPermissions"`: The IP permissions for the security group rule. You must specify either IP permissions or a description. -- `"SecurityGroupRuleDescription"`: [VPC only] The description for the ingress security - group rules. You must specify either a description or IP permissions. +- `"SecurityGroupRuleDescription"`: The description for the ingress security group rules. + You must specify either a description or IP permissions. """ function update_security_group_rule_descriptions_ingress(; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/ecs.jl b/src/services/ecs.jl index 63128dacb9..73f329f106 100644 --- a/src/services/ecs.jl +++ b/src/services/ecs.jl @@ -244,7 +244,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys isn't specified. If schedulingStrategy is DAEMON then this isn't required. - `"enableECSManagedTags"`: Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see Tagging your Amazon ECS resources in - the Amazon Elastic Container Service Developer Guide. + the Amazon Elastic Container Service Developer Guide. When you use Amazon ECS managed tags, + you need to set the propagateTags request parameter. - `"enableExecuteCommand"`: Determines whether the execute command functionality is turned on for the service. If true, this enables execute command functionality on all containers in the service tasks. @@ -318,7 +319,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"propagateTags"`: Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the - TagResource API action. + TagResource API action. The default is NONE. - `"role"`: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the @@ -729,6 +730,11 @@ count. You can't use a DELETE_IN_PROGRESS task definition revision to run new ta create new services. You also can't update an existing service to reference a DELETE_IN_PROGRESS task definition revision. A task definition revision will stay in DELETE_IN_PROGRESS status until all the associated tasks and services have been terminated. +When you delete all INACTIVE task definition revisions, the task definition name is not +displayed in the console and not returned in the API. If a task definition revisions are in +the DELETE_IN_PROGRESS state, the task definition name is displayed in the console and +returned in the API. The task definition name is retained by Amazon ECS and the revision is +incremented the next time you create a task definition with that name. # Arguments - `task_definitions`: The family and revision (family:revision) or full Amazon Resource diff --git a/src/services/efs.jl b/src/services/efs.jl index 53fd04f413..c11aa06434 100644 --- a/src/services/efs.jl +++ b/src/services/efs.jl @@ -16,10 +16,14 @@ exposed as the access point's root directory. Applications using the access poin access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points. If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near -the limit of 1000 access points, you may experience a throttling response for these +the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit. This operation requires permissions for the elasticfilesystem:CreateAccessPoint -action. +action. Access points can be tagged on creation. If tags are specified in the creation +action, IAM performs additional authorization on the elasticfilesystem:TagResource action +to verify if users have permissions to create tags. Therefore, you must grant explicit +permissions to use the elasticfilesystem:TagResource action. For more information, see +Granting permissions to tag resources during creation. # Arguments - `client_token`: A string of up to 64 ASCII characters that Amazon EFS uses to ensure @@ -110,8 +114,13 @@ the file system using the ThroughputMode parameter. After the file system is ful Amazon EFS sets its lifecycle state to available, at which point you can create one or more mount targets for the file system in your VPC. For more information, see CreateMountTarget. You mount your Amazon EFS file system on an EC2 instances in your VPC by using the mount -target. For more information, see Amazon EFS: How it Works. This operation requires -permissions for the elasticfilesystem:CreateFileSystem action. +target. For more information, see Amazon EFS: How it Works. This operation requires +permissions for the elasticfilesystem:CreateFileSystem action. File systems can be tagged +on creation. If tags are specified in the creation action, IAM performs additional +authorization on the elasticfilesystem:TagResource action to verify if users have +permissions to create tags. Therefore, you must grant explicit permissions to use the +elasticfilesystem:TagResource action. For more information, see Granting permissions to tag +resources during creation. # Arguments - `creation_token`: A string of up to 64 ASCII characters. Amazon EFS uses this to ensure diff --git a/src/services/emr.jl b/src/services/emr.jl index a77031a607..37613cf557 100644 --- a/src/services/emr.jl +++ b/src/services/emr.jl @@ -1414,6 +1414,49 @@ function list_studios( ) end +""" + list_supported_instance_types(release_label) + list_supported_instance_types(release_label, params::Dict{String,<:Any}) + +A list of the instance types that Amazon EMR supports. You can filter the list by Amazon +Web Services Region and Amazon EMR release. + +# Arguments +- `release_label`: The Amazon EMR release label determines the versions of open-source + application packages that Amazon EMR has installed on the cluster. Release labels are in + the format emr-x.x.x, where x.x.x is an Amazon EMR release number such as emr-6.10.0. For + more information about Amazon EMR releases and their included application versions and + features, see the Amazon EMR Release Guide . + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Marker"`: The pagination token that marks the next set of results to retrieve. +""" +function list_supported_instance_types( + ReleaseLabel; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr( + "ListSupportedInstanceTypes", + Dict{String,Any}("ReleaseLabel" => ReleaseLabel); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_supported_instance_types( + ReleaseLabel, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "ListSupportedInstanceTypes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ReleaseLabel" => ReleaseLabel), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_cluster(cluster_id) modify_cluster(cluster_id, params::Dict{String,<:Any}) diff --git a/src/services/emr_serverless.jl b/src/services/emr_serverless.jl index 43b98f6cb6..4884b6284d 100644 --- a/src/services/emr_serverless.jl +++ b/src/services/emr_serverless.jl @@ -49,7 +49,7 @@ Creates an application. # Arguments - `client_token`: The client idempotency token of the application to create. Its value must be unique for each request. -- `release_label`: The EMR release associated with the application. +- `release_label`: The Amazon EMR release associated with the application. - `type`: The type of application you want to start, such as Spark or Hive. # Optional Parameters @@ -186,8 +186,12 @@ end get_dashboard_for_job_run(application_id, job_run_id) get_dashboard_for_job_run(application_id, job_run_id, params::Dict{String,<:Any}) -Returns a URL to access the job run dashboard. The generated URL is valid for one hour, -after which you must invoke the API again to generate a new URL. +Creates and returns a URL that you can use to access the application UIs for a job run. For +jobs in a running state, the application UI is a live user interface such as the Spark or +Tez web UI. For completed jobs, the application UI is a persistent application user +interface such as the Spark History Server or persistent Tez UI. The URL is valid for one +hour after you generate it. To access the application UI after that hour elapses, you must +invoke the API again to generate a new URL. # Arguments - `application_id`: The ID of the application. @@ -594,6 +598,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys This is cumulative across all workers at any given point in time during the lifespan of the application. No new resources will be created once any one of the defined limits is hit. - `"networkConfiguration"`: +- `"releaseLabel"`: The Amazon EMR release label for the application. You can change the + release label to use a different release of Amazon EMR. - `"workerTypeSpecifications"`: The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark diff --git a/src/services/entityresolution.jl b/src/services/entityresolution.jl new file mode 100644 index 0000000000..401d3b615e --- /dev/null +++ b/src/services/entityresolution.jl @@ -0,0 +1,654 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: entityresolution +using AWS.Compat +using AWS.UUIDs + +""" + create_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name) + create_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name, params::Dict{String,<:Any}) + +Creates a MatchingWorkflow object which stores the configuration of the data processing job +to be run. It is important to note that there should not be a pre-existing MatchingWorkflow +with the same name. To modify an existing workflow, utilize the UpdateMatchingWorkflow API. + +# Arguments +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `output_source_config`: A list of OutputSource objects, each of which contains fields + OutputS3Path, ApplyNormalization, and Output. +- `resolution_techniques`: An object which defines the resolutionType and the + ruleBasedProperties +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. +- `workflow_name`: The name of the workflow. There cannot be multiple + DataIntegrationWorkflows with the same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"incrementalRunConfig"`: An object which defines an incremental run type and has only + incrementalRunType as a field. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows", + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + "workflowName" => workflowName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + "workflowName" => workflowName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_schema_mapping(schema_name) + create_schema_mapping(schema_name, params::Dict{String,<:Any}) + +Creates a schema mapping, which defines the schema of the input customer records table. The +SchemaMapping also provides Entity Resolution with some metadata about the table, such as +the attribute types of the columns and which columns to match on. + +# Arguments +- `schema_name`: The name of the schema. There cannot be multiple SchemaMappings with the + same name. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the schema. +- `"mappedInputFields"`: A list of MappedInputFields. Each MappedInputField corresponds to + a column the source data table, and contains column name plus additional information that + Entity Resolution uses for matching. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_schema_mapping( + schemaName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/schemas", + Dict{String,Any}("schemaName" => schemaName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_schema_mapping( + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/schemas", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("schemaName" => schemaName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_matching_workflow(workflow_name) + delete_matching_workflow(workflow_name, params::Dict{String,<:Any}) + +Deletes the MatchingWorkflow with a given name. This operation will succeed even if a +workflow with the given name does not exist. + +# Arguments +- `workflow_name`: The name of the workflow to be retrieved. + +""" +function delete_matching_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_matching_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/matchingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_schema_mapping(schema_name) + delete_schema_mapping(schema_name, params::Dict{String,<:Any}) + +Deletes the SchemaMapping with a given name. This operation will succeed even if a schema +with the given name does not exist. This operation will fail if there is a +DataIntegrationWorkflow object that references the SchemaMapping in the workflow's +InputSourceConfig. + +# Arguments +- `schema_name`: The name of the schema to delete. + +""" +function delete_schema_mapping( + schemaName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/schemas/$(schemaName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_schema_mapping( + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/schemas/$(schemaName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_match_id(record, workflow_name) + get_match_id(record, workflow_name, params::Dict{String,<:Any}) + +Returns the corresponding Match ID of a customer record if the record has been processed. + +# Arguments +- `record`: The record to fetch the Match ID for. +- `workflow_name`: The name of the workflow. + +""" +function get_match_id( + record, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/matches", + Dict{String,Any}("record" => record); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_match_id( + record, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/matches", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("record" => record), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_matching_job(job_id, workflow_name) + get_matching_job(job_id, workflow_name, params::Dict{String,<:Any}) + +Gets the status, metrics, and errors (if there are any) that are associated with a job. + +# Arguments +- `job_id`: The ID of the job. +- `workflow_name`: The name of the workflow. + +""" +function get_matching_job( + jobId, workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_matching_job( + jobId, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_matching_workflow(workflow_name) + get_matching_workflow(workflow_name, params::Dict{String,<:Any}) + +Returns the MatchingWorkflow with a given name, if it exists. + +# Arguments +- `workflow_name`: The name of the workflow. + +""" +function get_matching_workflow( + workflowName; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_matching_workflow( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_schema_mapping(schema_name) + get_schema_mapping(schema_name, params::Dict{String,<:Any}) + +Returns the SchemaMapping of a given name. + +# Arguments +- `schema_name`: The name of the schema to be retrieved. + +""" +function get_schema_mapping(schemaName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", + "/schemas/$(schemaName)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_schema_mapping( + schemaName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/schemas/$(schemaName)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_matching_jobs(workflow_name) + list_matching_jobs(workflow_name, params::Dict{String,<:Any}) + +Lists all jobs for a given workflow. + +# Arguments +- `workflow_name`: The name of the workflow to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous ListSchemaMappings API call. +""" +function list_matching_jobs(workflowName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_matching_jobs( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/matchingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_matching_workflows() + list_matching_workflows(params::Dict{String,<:Any}) + +Returns a list of all the MatchingWorkflows that have been created for an AWS account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous ListSchemaMappings API call. +""" +function list_matching_workflows(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/matchingworkflows"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_matching_workflows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/matchingworkflows", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_schema_mappings() + list_schema_mappings(params::Dict{String,<:Any}) + +Returns a list of all the SchemaMappings that have been created for an AWS account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of objects returned per page. +- `"nextToken"`: The pagination token from the previous ListSchemaMappings API call. +""" +function list_schema_mappings(; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "GET", "/schemas"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_schema_mappings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", "/schemas", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Displays the tags associated with an AWS Entity Resolution resource. In Entity Resolution, +SchemaMapping, and MatchingWorkflow can be tagged. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to view tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_matching_job(workflow_name) + start_matching_job(workflow_name, params::Dict{String,<:Any}) + +Starts the MatchingJob of a workflow. The workflow must have previously been created using +the CreateMatchingWorkflow endpoint. + +# Arguments +- `workflow_name`: The name of the matching job to be retrieved. + +""" +function start_matching_job(workflowName; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_matching_job( + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/matchingworkflows/$(workflowName)/jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns one or more tags (key-value pairs) to the specified AWS Entity Resolution resource. +Tags can help you organize and categorize your resources. You can also use them to scope +user permissions by granting a user permission to access or change only resources with +certain tag values. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be +tagged. Tags don't have any semantic meaning to AWS and are interpreted strictly as strings +of characters. You can use the TagResource action with a resource that already has tags. If +you specify a new tag key, this tag is appended to the list of tags associated with the +resource. If you specify a tag key that is already associated with the resource, the new +tag value that you specify replaces the previous value for that tag. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to view tags. +- `tags`: The tags used to organize, track, or control access for this resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return entityresolution( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags from the specified AWS Entity Resolution resource. In Entity +Resolution, SchemaMapping, and MatchingWorkflow can be tagged. + +# Arguments +- `resource_arn`: The ARN of the resource for which you want to untag. +- `tag_keys`: The list of tag keys to remove from the resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return entityresolution( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name) + update_matching_workflow(input_source_config, output_source_config, resolution_techniques, role_arn, workflow_name, params::Dict{String,<:Any}) + +Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, +except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow must +already exist for the method to succeed. + +# Arguments +- `input_source_config`: A list of InputSource objects, which have the fields + InputSourceARN and SchemaName. +- `output_source_config`: A list of OutputSource objects, each of which contains fields + OutputS3Path, ApplyNormalization, and Output. +- `resolution_techniques`: An object which defines the resolutionType and the + ruleBasedProperties +- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. +- `workflow_name`: The name of the workflow to be retrieved. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the workflow. +- `"incrementalRunConfig"`: An object which defines an incremental run type and has only + incrementalRunType as a field. +""" +function update_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/matchingworkflows/$(workflowName)", + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_matching_workflow( + inputSourceConfig, + outputSourceConfig, + resolutionTechniques, + roleArn, + workflowName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return entityresolution( + "PUT", + "/matchingworkflows/$(workflowName)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputSourceConfig" => inputSourceConfig, + "outputSourceConfig" => outputSourceConfig, + "resolutionTechniques" => resolutionTechniques, + "roleArn" => roleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/fsx.jl b/src/services/fsx.jl index 6a2ee7dd4c..ce906e7f35 100644 --- a/src/services/fsx.jl +++ b/src/services/fsx.jl @@ -2151,11 +2151,11 @@ AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays -DailyAutomaticBackupStartTime DataCompressionType LustreRootSquashConfiguration -StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can -update the following properties: AddRouteTableIds AutomaticBackupRetentionDays -DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword -RemoveRouteTableIds StorageCapacity ThroughputCapacity +DailyAutomaticBackupStartTime DataCompressionType LogConfiguration +LustreRootSquashConfiguration StorageCapacity WeeklyMaintenanceStartTime For FSx +for ONTAP file systems, you can update the following properties: AddRouteTableIds +AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration + FsxAdminPassword RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration diff --git a/src/services/gamelift.jl b/src/services/gamelift.jl index eeefa6cfb6..d18e155878 100644 --- a/src/services/gamelift.jl +++ b/src/services/gamelift.jl @@ -86,9 +86,9 @@ connection information that players can use to connect to the game server. To c server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or -player information. Filter options may be included to further restrict how a game server is -chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game -server is successfully claimed, connection information is returned. A claimed game server's +player information. Add filter options to further restrict how a game server is chosen, +such as only allowing game servers on ACTIVE instances to be claimed. When a game server is +successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server @@ -96,8 +96,8 @@ reverts to unclaimed status and is available to be claimed by another request. T time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is -running on an instance in DRAINING status and provided filter option does not allow placing -on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide +running on an instance in DRAINING status and the provided filter option does not allow +placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide # Arguments - `game_server_group_name`: A unique identifier for the game server group where the game @@ -878,8 +878,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accept a proposed match, if acceptance is required. - `"AdditionalPlayerCount"`: The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single - 12-person team, and the additional player count is set to 2, only 10 players are selected - for the match. This parameter is not used if FlexMatchMode is set to STANDALONE. + 10-person team, and the additional player count is set to 2, 10 players will be selected + for the match and 2 more player slots will be open for future players. This parameter is + not used if FlexMatchMode is set to STANDALONE. - `"BackfillMode"`: The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have Amazon GameLift @@ -5413,8 +5414,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accept a proposed match, if acceptance is required. - `"AdditionalPlayerCount"`: The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single - 12-person team, and the additional player count is set to 2, only 10 players are selected - for the match. This parameter is not used if FlexMatchMode is set to STANDALONE. + 10-person team, and the additional player count is set to 2, 10 players will be selected + for the match and 2 more player slots will be open for future players. This parameter is + not used if FlexMatchMode is set to STANDALONE. - `"BackfillMode"`: The method that is used to backfill game sessions created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have GameLift create a diff --git a/src/services/glue.jl b/src/services/glue.jl index 733c7cd44f..3c9c8b5558 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -1407,18 +1407,32 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). - `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts - a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray - jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a - 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 - DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to - 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We - recommend this worker type for memory-intensive jobs. For the G.025X worker type, each - worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per - worker. We recommend this worker type for low volume streaming jobs. This worker type is - only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker - maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers - based on the autoscaler. + a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray + jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with + 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for workloads such as data transforms, joins, and queries, to offers a scalable + and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 + DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 + executor per worker. We recommend this worker type for workloads such as data transforms, + joins, and queries, to offers a scalable and cost effective way to run most jobs. For the + G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk + (approximately 235GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), + US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), + Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the + G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk + (approximately 487GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For + the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB + disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for low volume streaming jobs. This worker type is only available for Glue + version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU + (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 + Ray workers based on the autoscaler. """ function create_job(Command, Name, Role; aws_config::AbstractAWSConfig=global_aws_config()) return glue( @@ -1914,17 +1928,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Timeout"`: The number of minutes before session times out. Default for Spark ETL jobs is 48 hours (2880 minutes), the maximum session lifetime for this job type. Consult the documentation for other job types. -- `"WorkerType"`: The type of predefined worker that is allocated to use for the session. - Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each - worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For - the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and - provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. - For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), - and provides 1 executor per worker. We recommend this worker type for memory-intensive - jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, - 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low - volume streaming jobs. This worker type is only available for Glue version 3.0 streaming - jobs. +- `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts + a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray + notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) + with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend + this worker type for workloads such as data transforms, joins, and queries, to offers a + scalable and cost effective way to run most jobs. For the G.2X worker type, each worker + maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and + provides 1 executor per worker. We recommend this worker type for workloads such as data + transforms, joins, and queries, to offers a scalable and cost effective way to run most + jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) + with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We + recommend this worker type for jobs whose workloads contain your most demanding transforms, + aggregations, joins, and queries. This worker type is available only for Glue version 3.0 + or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US + East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia + Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe + (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of + memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We + recommend this worker type for jobs whose workloads contain your most demanding transforms, + aggregations, joins, and queries. This worker type is available only for Glue version 3.0 + or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X + worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of + memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based + on the autoscaler. """ function create_session( Command, Id, Role; aws_config::AbstractAWSConfig=global_aws_config() @@ -1973,6 +2000,8 @@ Creates a new table definition in the Data Catalog. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CatalogId"`: The ID of the Data Catalog in which to create the Table. If none is supplied, the Amazon Web Services account ID is used by default. +- `"OpenTableFormatInput"`: Specifies an OpenTableFormatInput structure when creating an + open format table. - `"PartitionIndexes"`: A list of partition indexes, PartitionIndex structures, to create in the table. - `"TransactionId"`: The ID of the transaction. @@ -7074,18 +7103,32 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the timeout value set in the parent job. Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). - `"WorkerType"`: The type of predefined worker that is allocated when a job runs. Accepts - a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray - jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a - 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 - DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to - 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We - recommend this worker type for memory-intensive jobs. For the G.025X worker type, each - worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per - worker. We recommend this worker type for low volume streaming jobs. This worker type is - only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker - maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one - per vCPU) based on the autoscaler. + a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray + jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with + 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for workloads such as data transforms, joins, and queries, to offers a scalable + and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 + DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 + executor per worker. We recommend this worker type for workloads such as data transforms, + joins, and queries, to offers a scalable and cost effective way to run most jobs. For the + G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk + (approximately 235GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), + US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), + Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the + G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk + (approximately 487GB free), and provides 1 executor per worker. We recommend this worker + type for jobs whose workloads contain your most demanding transforms, aggregations, joins, + and queries. This worker type is available only for Glue version 3.0 or later Spark ETL + jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For + the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB + disk (approximately 34GB free), and provides 1 executor per worker. We recommend this + worker type for low volume streaming jobs. This worker type is only available for Glue + version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU + (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 + Ray workers based on the autoscaler. """ function start_job_run(JobName; aws_config::AbstractAWSConfig=global_aws_config()) return glue( diff --git a/src/services/grafana.jl b/src/services/grafana.jl index 1c08b7afc2..725db1cb92 100644 --- a/src/services/grafana.jl +++ b/src/services/grafana.jl @@ -78,8 +78,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"configuration"`: The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. -- `"grafanaVersion"`: Specifies the version of Grafana to support in the new workspace. - Supported values are 8.4 and 9.4. +- `"grafanaVersion"`: Specifies the version of Grafana to support in the new workspace. To + get a list of supported version, use the ListVersions operation. - `"networkAccessControl"`: Configuration for network access to your workspace. When this is configured, only listed IP addresses and VPC endpoints will be able to access your workspace. Standard Grafana authentication and authorization will still be required. If @@ -92,7 +92,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to be used for this workspace. - `"tags"`: The list of tags associated with the workspace. - `"vpcConfiguration"`: The configuration settings for an Amazon VPC that contains data - sources for your Grafana workspace to connect to. + sources for your Grafana workspace to connect to. Connecting to a private VPC is not yet + available in the Asia Pacific (Seoul) Region (ap-northeast-2). - `"workspaceDataSources"`: This parameter is for internal use only, and should not be used. - `"workspaceDescription"`: A description for the workspace. This is used only to help you identify this workspace. Pattern: ^[p{L}p{Z}p{N}p{P}]{0,2048} @@ -504,6 +505,34 @@ function list_tags_for_resource( ) end +""" + list_versions() + list_versions(params::Dict{String,<:Any}) + +Lists available versions of Grafana. These are available when calling CreateWorkspace. +Optionally, include a workspace to list the versions to which it can be upgraded. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to include in the response. +- `"nextToken"`: The token to use when requesting the next set of results. You receive this + token from a previous ListVersions operation. +- `"workspace-id"`: The ID of the workspace to list the available upgrade versions. If not + included, lists all versions of Grafana that are supported for CreateWorkspace. +""" +function list_versions(; aws_config::AbstractAWSConfig=global_aws_config()) + return grafana( + "GET", "/versions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_versions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return grafana( + "GET", "/versions", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_workspaces() list_workspaces(params::Dict{String,<:Any}) @@ -809,6 +838,12 @@ Updates the configuration string for the given workspace about the format and configuration options available, see Working in your Grafana workspace. - `workspace_id`: The ID of the workspace to update. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"grafanaVersion"`: Specifies the version of Grafana to support in the new workspace. Can + only be used to upgrade (for example, from 8.4 to 9.4), not downgrade (for example, from + 9.4 to 8.4). To know what versions are available to upgrade to for a specific workspace, + see the ListVersions operation. """ function update_workspace_configuration( configuration, workspaceId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/guardduty.jl b/src/services/guardduty.jl index a555bbb3cf..72baa6e1c6 100644 --- a/src/services/guardduty.jl +++ b/src/services/guardduty.jl @@ -392,12 +392,17 @@ end Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated -member accounts either by invitation or through an organization. When using Create Members -as an organizations delegated administrator this action will enable GuardDuty in the added -member accounts, with the exception of the organization delegated administrator account, -which must enable GuardDuty prior to being added as a member. If you are adding accounts by -invitation, use this action after GuardDuty has bee enabled in potential member accounts -and before using InviteMembers. +member accounts either by invitation or through an organization. As a delegated +administrator, using CreateMembers will enable GuardDuty in the added member accounts, with +the exception of the organization delegated administrator account. A delegated +administrator must enable GuardDuty prior to being added as a member. If you are adding +accounts by invitation, before using InviteMembers, use CreateMembers after GuardDuty has +been enabled in potential member accounts. If you disassociate a member from a GuardDuty +delegated administrator, the member account details obtained from this API, including the +associated email addresses, will be retained. This is done so that the delegated +administrator can invoke the InviteMembers API without the need to invoke the CreateMembers +API again. To remove the details associated with a member account, the delegated +administrator must invoke the DeleteMembers API. # Arguments - `account_details`: A list of account ID and email address pairs of the accounts that you @@ -1084,7 +1089,12 @@ end disassociate_from_administrator_account(detector_id) disassociate_from_administrator_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. With +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty in a member account. @@ -1120,7 +1130,12 @@ end disassociate_from_master_account(detector_id) disassociate_from_master_account(detector_id, params::Dict{String,<:Any}) -Disassociates the current GuardDuty member account from its administrator account. +Disassociates the current GuardDuty member account from its administrator account. When you +disassociate an invited member from a GuardDuty delegated administrator, the member account +details obtained from the CreateMembers API, including the associated email addresses, are +retained. This is done so that the delegated administrator can invoke the InviteMembers API +without the need to invoke the CreateMembers API again. To remove the details associated +with a member account, the delegated administrator must invoke the DeleteMembers API. # Arguments - `detector_id`: The unique ID of the detector of the GuardDuty member account. @@ -1154,10 +1169,16 @@ end disassociate_members(account_ids, detector_id) disassociate_members(account_ids, detector_id, params::Dict{String,<:Any}) -Disassociates GuardDuty member accounts (to the current administrator account) specified by -the account IDs. With autoEnableOrganizationMembers configuration for your organization set -to ALL, you'll receive an error if you attempt to disassociate a member account before -removing them from your Amazon Web Services organization. +Disassociates GuardDuty member accounts (from the current administrator account) specified +by the account IDs. When you disassociate an invited member from a GuardDuty delegated +administrator, the member account details obtained from the CreateMembers API, including +the associated email addresses, are retained. This is done so that the delegated +administrator can invoke the InviteMembers API without the need to invoke the CreateMembers +API again. To remove the details associated with a member account, the delegated +administrator must invoke the DeleteMembers API. With autoEnableOrganizationMembers +configuration for your organization set to ALL, you'll receive an error if you attempt to +disassociate a member account before removing them from your Amazon Web Services +organization. # Arguments - `account_ids`: A list of account IDs of the GuardDuty member accounts that you want to @@ -1831,10 +1852,21 @@ end invite_members(account_ids, detector_id) invite_members(account_ids, detector_id, params::Dict{String,<:Any}) -Invites other Amazon Web Services accounts (created as members of the current Amazon Web -Services account by CreateMembers) to enable GuardDuty, and allow the current Amazon Web -Services account to view and manage these accounts' findings on their behalf as the -GuardDuty administrator account. +Invites Amazon Web Services accounts to become members of an organization administered by +the Amazon Web Services account that invokes this API. If you are using Amazon Web Services +Organizations to manager your GuardDuty environment, this step is not needed. For more +information, see Managing accounts with Amazon Web Services Organizations. To invite Amazon +Web Services accounts, the first step is to ensure that GuardDuty has been enabled in the +potential member accounts. You can now invoke this API to add accounts by invitation. The +invited accounts can either accept or decline the invitation from their GuardDuty accounts. +Each invited Amazon Web Services account can choose to accept the invitation from only one +Amazon Web Services account. For more information, see Managing GuardDuty accounts by +invitation. After the invite has been accepted and you choose to disassociate a member +account (by using DisassociateMembers) from your account, the details of the member account +obtained by invoking CreateMembers, including the associated email addresses, will be +retained. This is done so that you can invoke InviteMembers without the need to invoke +CreateMembers again. To remove the details associated with a member account, you must also +invoke DeleteMembers. # Arguments - `account_ids`: A list of account IDs of the accounts that you want to invite to GuardDuty @@ -2257,8 +2289,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Lists tags for a resource. Tagging is currently supported for detectors, finding filters, -IP sets, and threat intel sets, with a limit of 50 tags per resource. When invoked, this -operation returns all assigned tags for a given resource. +IP sets, threat intel sets, publishing destination, with a limit of 50 tags per resource. +When invoked, this operation returns all assigned tags for a given resource. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) for the given GuardDuty resource. diff --git a/src/services/healthlake.jl b/src/services/healthlake.jl index dd349ed7cc..4546c348b3 100644 --- a/src/services/healthlake.jl +++ b/src/services/healthlake.jl @@ -8,24 +8,24 @@ using AWS.UUIDs create_fhirdatastore(datastore_type_version) create_fhirdatastore(datastore_type_version, params::Dict{String,<:Any}) -Creates a Data Store that can ingest and export FHIR formatted data. +Creates a data store that can ingest and export FHIR formatted data. # Arguments -- `datastore_type_version`: The FHIR version of the Data Store. The only supported version +- `datastore_type_version`: The FHIR version of the data store. The only supported version is R4. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Optional user provided token used for ensuring idempotency. -- `"DatastoreName"`: The user generated name for the Data Store. +- `"DatastoreName"`: The user generated name for the data store. - `"IdentityProviderConfiguration"`: The configuration of the identity provider that you - want to use for your Data Store. -- `"PreloadDataConfig"`: Optional parameter to preload data upon creation of the Data - Store. Currently, the only supported preloaded data is synthetic data generated from + want to use for your data store. +- `"PreloadDataConfig"`: Optional parameter to preload data upon creation of the data + store. Currently, the only supported preloaded data is synthetic data generated from Synthea. - `"SseConfiguration"`: The server-side encryption key configuration for a customer - provided encryption key specified for creating a Data Store. -- `"Tags"`: Resource tags that are applied to a Data Store when it is created. + provided encryption key specified for creating a data store. +- `"Tags"`: Resource tags that are applied to a data store when it is created. """ function create_fhirdatastore( DatastoreTypeVersion; aws_config::AbstractAWSConfig=global_aws_config() @@ -65,10 +65,10 @@ end delete_fhirdatastore(datastore_id) delete_fhirdatastore(datastore_id, params::Dict{String,<:Any}) -Deletes a Data Store. +Deletes a data store. # Arguments -- `datastore_id`: The AWS-generated ID for the Data Store to be deleted. +- `datastore_id`: The AWS-generated ID for the data store to be deleted. """ function delete_fhirdatastore( @@ -100,12 +100,12 @@ end describe_fhirdatastore(datastore_id) describe_fhirdatastore(datastore_id, params::Dict{String,<:Any}) -Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data -Store ARN, Data Store name, Data Store status, created at, Data Store type version, and -Data Store endpoint. +Gets the properties associated with the FHIR data store, including the data store ID, data +store ARN, data store name, data store status, when the data store was created, data store +type version, and the data store's endpoint. # Arguments -- `datastore_id`: The AWS-generated Data Store ID. +- `datastore_id`: The AWS-generated data store ID. """ function describe_fhirdatastore( @@ -141,7 +141,7 @@ Displays the properties of a FHIR export job, including the ID, ARN, name, and t of the job. # Arguments -- `datastore_id`: The AWS generated ID for the Data Store from which files are being +- `datastore_id`: The AWS generated ID for the data store from which files are being exported from for an export job. - `job_id`: The AWS generated ID for an export job. @@ -184,7 +184,7 @@ Displays the properties of a FHIR import job, including the ID, ARN, name, and t of the job. # Arguments -- `datastore_id`: The AWS-generated ID of the Data Store. +- `datastore_id`: The AWS-generated ID of the data store. - `job_id`: The AWS-generated job ID. """ @@ -222,15 +222,15 @@ end list_fhirdatastores() list_fhirdatastores(params::Dict{String,<:Any}) -Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store +Lists all FHIR data stores that are in the user’s account, regardless of data store status. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filter"`: Lists all filters associated with a FHIR Data Store request. -- `"MaxResults"`: The maximum number of Data Stores returned in a single page of a +- `"Filter"`: Lists all filters associated with a FHIR data store request. +- `"MaxResults"`: The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest call. -- `"NextToken"`: Fetches the next page of Data Stores when results are paginated. +- `"NextToken"`: Fetches the next page of data stores when results are paginated. """ function list_fhirdatastores(; aws_config::AbstractAWSConfig=global_aws_config()) return healthlake( @@ -253,7 +253,7 @@ end # Arguments - `datastore_id`: This parameter limits the response to the export job with the specified - Data Store ID. + data store ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -303,7 +303,7 @@ end # Arguments - `datastore_id`: This parameter limits the response to the import job with the specified - Data Store ID. + data store ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -349,10 +349,10 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) - Returns a list of all existing tags associated with a Data Store. + Returns a list of all existing tags associated with a data store. # Arguments -- `resource_arn`: The Amazon Resource Name(ARN) of the Data Store for which tags are being +- `resource_arn`: The Amazon Resource Name(ARN) of the data store for which tags are being added. """ @@ -390,7 +390,7 @@ Begins a FHIR export job. # Arguments - `client_token`: An optional user provided token used for ensuring idempotency. - `data_access_role_arn`: The Amazon Resource Name used during the initiation of the job. -- `datastore_id`: The AWS generated ID for the Data Store from which files are being +- `datastore_id`: The AWS generated ID for the data store from which files are being exported for an export job. - `output_data_config`: The output data configuration that was supplied when the export job was created. @@ -453,9 +453,9 @@ Begins a FHIR Import job. # Arguments - `client_token`: Optional user provided token used for ensuring idempotency. -- `data_access_role_arn`: The Amazon Resource Name (ARN) that gives Amazon HealthLake - access permission. -- `datastore_id`: The AWS-generated Data Store ID. +- `data_access_role_arn`: The Amazon Resource Name (ARN) that gives AWS HealthLake access + permission. +- `datastore_id`: The AWS-generated data store ID. - `input_data_config`: The input properties of the FHIR Import job in the StartFHIRImport job request. - `job_output_data_config`: @@ -518,12 +518,12 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) - Adds a user specified key and value tag to a Data Store. + Adds a user specified key and value tag to a data store. # Arguments -- `resource_arn`: The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the - Data Store which tags are being added to. -- `tags`: The user specified key and value pair tags being added to a Data Store. +- `resource_arn`: The Amazon Resource Name(ARN)that gives AWS HealthLake access to the + data store which tags are being added to. +- `tags`: The user specified key and value pair tags being added to a data store. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) @@ -558,12 +558,12 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) - Removes tags from a Data Store. + Removes tags from a data store. # Arguments -- `resource_arn`: \"The Amazon Resource Name(ARN) of the Data Store for which tags are - being removed -- `tag_keys`: The keys for the tags to be removed from the Healthlake Data Store. +- `resource_arn`: The Amazon Resource Name(ARN) of the data store for which tags are being + removed. +- `tag_keys`: The keys for the tags to be removed from the HealthLake data store. """ function untag_resource( diff --git a/src/services/iam.jl b/src/services/iam.jl index 0118ff1d9e..1499285b6d 100644 --- a/src/services/iam.jl +++ b/src/services/iam.jl @@ -66,9 +66,9 @@ and then add a different role to an instance profile. You must then wait for the appear across all of Amazon Web Services because of eventual consistency. To force the change, you must disassociate the instance profile and then associate the instance profile, or you can stop your instance and then restart it. The caller of this operation must be -granted the PassRole permission on the IAM role by a permissions policy. For more -information about roles, see Working with roles. For more information about instance -profiles, see About instance profiles. +granted the PassRole permission on the IAM role by a permissions policy. For more +information about roles, see IAM roles in the IAM User Guide. For more information about +instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to update. This parameter @@ -165,7 +165,7 @@ end Attaches the specified managed policy to the specified IAM group. You use this operation to attach a managed policy to a group. To embed an inline policy in a group, use -PutGroupPolicy. As a best practice, you can validate your IAM policies. To learn more, see +PutGroupPolicy . As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. For more information about policies, see Managed policies and inline policies in the IAM User Guide. @@ -216,9 +216,9 @@ end Attaches the specified managed policy to the specified IAM role. When you attach a managed policy to a role, the managed policy becomes part of the role's permission (access) policy. You cannot use a managed policy as the role's trust policy. The role's trust policy is -created at the same time as the role, using CreateRole. You can update a role's trust -policy using UpdateAssumeRolePolicy. Use this operation to attach a managed policy to a -role. To embed an inline policy in a role, use PutRolePolicy. For more information about +created at the same time as the role, using CreateRole . You can update a role's trust +policy using UpdateAssumerolePolicy . Use this operation to attach a managed policy to a +role. To embed an inline policy in a role, use PutRolePolicy . For more information about policies, see Managed policies and inline policies in the IAM User Guide. As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. @@ -268,8 +268,8 @@ end attach_user_policy(policy_arn, user_name, params::Dict{String,<:Any}) Attaches the specified managed policy to the specified user. You use this operation to -attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy. -As a best practice, you can validate your IAM policies. To learn more, see Validating IAM +attach a managed policy to a user. To embed an inline policy in a user, use PutUserPolicy +. As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide. For more information about policies, see Managed policies and inline policies in the IAM User Guide. @@ -626,13 +626,13 @@ OIDC provider A list of tags that are attached to the specified IAM OIDC provi list of thumbprints of one or more server certificates that the IdP uses You get all of this information from the OIDC IdP you want to use to access Amazon Web Services. Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our -library of trusted certificate authorities (CAs) instead of using a certificate thumbprint -to verify your IdP server certificate. These OIDC IdPs include Google, Auth0, and those -that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, -your legacy thumbprint remains in your configuration, but is no longer used for validation. - The trust for the OIDC provider is derived from the IAM provider that this operation -creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation -to highly privileged users. +library of trusted root certificate authorities (CAs) instead of using a certificate +thumbprint to verify your IdP server certificate. These OIDC IdPs include Auth0, GitHub, +Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. +In these cases, your legacy thumbprint remains in your configuration, but is no longer used +for validation. The trust for the OIDC provider is derived from the IAM provider that +this operation creates. Therefore, it is best to limit access to the +CreateOpenIDConnectProvider operation to highly privileged users. # Arguments - `thumbprint_list`: A list of server certificate thumbprints for the OpenID Connect (OIDC) @@ -856,9 +856,9 @@ end create_role(assume_role_policy_document, role_name) create_role(assume_role_policy_document, role_name, params::Dict{String,<:Any}) -Creates a new role for your Amazon Web Services account. For more information about roles, -see IAM roles. For information about quotas for role names and the number of roles you can -create, see IAM and STS quotas in the IAM User Guide. +Creates a new role for your Amazon Web Services account. For more information about roles, +see IAM roles in the IAM User Guide. For information about quotas for role names and the +number of roles you can create, see IAM and STS quotas in the IAM User Guide. # Arguments - `assume_role_policy_document`: The trust relationship policy document that grants an @@ -1503,7 +1503,7 @@ Deletes the specified instance profile. The instance profile must not have an as role. Make sure that you do not have any Amazon EC2 instances running with the instance profile you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance. For more -information about instance profiles, see About instance profiles. +information about instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to delete. This parameter @@ -3137,7 +3137,7 @@ end Retrieves information about the specified instance profile, including the instance profile's path, GUID, ARN, and role. For more information about instance profiles, see -About instance profiles in the IAM User Guide. +Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to get information about. This @@ -3219,6 +3219,43 @@ function get_login_profile( ) end +""" + get_mfadevice(serial_number) + get_mfadevice(serial_number, params::Dict{String,<:Any}) + +Retrieves information about an MFA device for a specified user. + +# Arguments +- `serial_number`: Serial number that uniquely identifies the MFA device. For this API, we + only accept FIDO security key ARNs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"UserName"`: The friendly name identifying the user. +""" +function get_mfadevice(SerialNumber; aws_config::AbstractAWSConfig=global_aws_config()) + return iam( + "GetMFADevice", + Dict{String,Any}("SerialNumber" => SerialNumber); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_mfadevice( + SerialNumber, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return iam( + "GetMFADevice", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SerialNumber" => SerialNumber), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_open_idconnect_provider(open_idconnect_provider_arn) get_open_idconnect_provider(open_idconnect_provider_arn, params::Dict{String,<:Any}) @@ -3426,11 +3463,11 @@ end Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information -about roles, see Working with roles. Policies returned by this operation are URL-encoded -compliant with RFC 3986. You can use a URL decoding method to convert the policy back to -plain JSON text. For example, if you use Java, you can use the decode method of the -java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar -functionality. +about roles, see IAM roles in the IAM User Guide. Policies returned by this operation are +URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the +policy back to plain JSON text. For example, if you use Java, you can use the decode method +of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide +similar functionality. # Arguments - `role_name`: The name of the IAM role to get information about. This parameter allows @@ -3474,8 +3511,8 @@ Java SDK. Other languages and SDKs provide similar functionality. An IAM role c have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document. For more information about policies, see -Managed policies and inline policies in the IAM User Guide. For more information about -roles, see Using roles to delegate permissions and federate identities. +Managed policies and inline policies in the IAM User Guide. For more information about +roles, see IAM roles in the IAM User Guide. # Arguments - `policy_name`: The name of the policy document to get. This parameter allows (through its @@ -4458,12 +4495,12 @@ end list_instance_profiles(params::Dict{String,<:Any}) Lists the instance profiles that have the specified path prefix. If there are none, the -operation returns an empty list. For more information about instance profiles, see About -instance profiles. IAM resource-listing operations return a subset of the available -attributes for the resource. For example, this operation does not return tags, even though -they are an attribute of the returned object. To view all of the information for an -instance profile, see GetInstanceProfile. You can paginate the results using the MaxItems -and Marker parameters. +operation returns an empty list. For more information about instance profiles, see Using +instance profiles in the IAM User Guide. IAM resource-listing operations return a subset +of the available attributes for the resource. For example, this operation does not return +tags, even though they are an attribute of the returned object. To view all of the +information for an instance profile, see GetInstanceProfile. You can paginate the results +using the MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -4508,8 +4545,8 @@ end Lists the instance profiles that have the specified associated IAM role. If there are none, the operation returns an empty list. For more information about instance profiles, go to -About instance profiles. You can paginate the results using the MaxItems and Marker -parameters. +Using instance profiles in the IAM User Guide. You can paginate the results using the +MaxItems and Marker parameters. # Arguments - `role_name`: The name of the role to list instance profiles for. This parameter allows @@ -5064,11 +5101,12 @@ end list_roles(params::Dict{String,<:Any}) Lists the IAM roles that have the specified path prefix. If there are none, the operation -returns an empty list. For more information about roles, see Working with roles. IAM -resource-listing operations return a subset of the available attributes for the resource. -For example, this operation does not return tags, even though they are an attribute of the -returned object. To view all of the information for a role, see GetRole. You can paginate -the results using the MaxItems and Marker parameters. +returns an empty list. For more information about roles, see IAM roles in the IAM User +Guide. IAM resource-listing operations return a subset of the available attributes for the +resource. This operation does not return the following attributes, even though they are an +attribute of the returned object: PermissionsBoundary RoleLastUsed Tags To view all +of the information for a role, see GetRole. You can paginate the results using the +MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5525,10 +5563,10 @@ end Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list. IAM resource-listing operations return a subset of the -available attributes for the resource. For example, this operation does not return tags, -even though they are an attribute of the returned object. To view all of the information -for a user, see GetUser. You can paginate the results using the MaxItems and Marker -parameters. +available attributes for the resource. This operation does not return the following +attributes, even though they are an attribute of the returned object: PermissionsBoundary + Tags To view all of the information for a user, see GetUser. You can paginate the +results using the MaxItems and Marker parameters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5610,10 +5648,10 @@ end Adds or updates an inline policy document that is embedded in the specified IAM group. A user can also have managed policies attached to it. To attach a managed policy to a group, -use AttachGroupPolicy. To create a new managed policy, use CreatePolicy. For information -about policies, see Managed policies and inline policies in the IAM User Guide. For -information about the maximum number of inline policies that you can embed in a group, see -IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you +use AttachGroupPolicy . To create a new managed policy, use CreatePolicy . For +information about policies, see Managed policies and inline policies in the IAM User Guide. +For information about the maximum number of inline policies that you can embed in a group, +see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you should use POST rather than GET when calling PutGroupPolicy. For general information about using the Query API with IAM, see Making query requests in the IAM User Guide. @@ -5625,7 +5663,7 @@ using the Query API with IAM, see Making query requests in the IAM User Guide. - `policy_document`: The policy document. You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before - submitting it to = IAM. The regex pattern used to validate this parameter is a string of + submitting it to IAM. The regex pattern used to validate this parameter is a string of characters consisting of the following: Any printable ASCII character ranging from the space character (u0020) through the end of the ASCII character range The printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF) The @@ -5739,16 +5777,15 @@ end Adds or updates an inline policy document that is embedded in the specified IAM role. When you embed an inline policy in a role, the inline policy is used as part of the role's access (permissions) policy. The role's trust policy is created at the same time as the -role, using CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. -For more information about IAM roles, see Using roles to delegate permissions and federate -identities. A role can also have a managed policy attached to it. To attach a managed -policy to a role, use AttachRolePolicy. To create a new managed policy, use CreatePolicy. -For information about policies, see Managed policies and inline policies in the IAM User -Guide. For information about the maximum number of inline policies that you can embed with -a role, see IAM and STS quotas in the IAM User Guide. Because policy documents can be -large, you should use POST rather than GET when calling PutRolePolicy. For general -information about using the Query API with IAM, see Making query requests in the IAM User -Guide. +role, using CreateRole . You can update a role's trust policy using +UpdateAssumeRolePolicy . For more information about roles, see IAM roles in the IAM User +Guide. A role can also have a managed policy attached to it. To attach a managed policy to +a role, use AttachRolePolicy . To create a new managed policy, use CreatePolicy . For +information about policies, see Managed policies and inline policies in the IAM User Guide. +For information about the maximum number of inline policies that you can embed with a role, +see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you +should use POST rather than GET when calling PutRolePolicy. For general information about +using the Query API with IAM, see Making query requests in the IAM User Guide. # Arguments - `policy_document`: The policy document. You must provide policies in JSON format in IAM. @@ -5871,7 +5908,7 @@ end Adds or updates an inline policy document that is embedded in the specified IAM user. An IAM user can also have a managed policy attached to it. To attach a managed policy to a -user, use AttachUserPolicy. To create a new managed policy, use CreatePolicy. For +user, use AttachUserPolicy . To create a new managed policy, use CreatePolicy . For information about policies, see Managed policies and inline policies in the IAM User Guide. For information about the maximum number of inline policies that you can embed in a user, see IAM and STS quotas in the IAM User Guide. Because policy documents can be large, you @@ -5996,8 +6033,8 @@ Removes the specified IAM role from the specified EC2 instance profile. Make su do not have any Amazon EC2 instances running with the role you are about to remove from the instance profile. Removing a role from an instance profile that is associated with a running instance might break any applications running on the instance. For more -information about IAM roles, see Working with roles. For more information about instance -profiles, see About instance profiles. +information about roles, see IAM roles in the IAM User Guide. For more information about +instance profiles, see Using instance profiles in the IAM User Guide. # Arguments - `instance_profile_name`: The name of the instance profile to update. This parameter @@ -7832,14 +7869,14 @@ are not merged.) Typically, you need to update a thumbprint only when the identi certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. Amazon Web Services secures -communication with some OIDC identity providers (IdPs) through our library of trusted +communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP -server certificate. These OIDC IdPs include Google, Auth0, and those that use an Amazon S3 -bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint -remains in your configuration, but is no longer used for validation. Trust for the OIDC -provider is derived from the provider certificate and is validated by the thumbprint. -Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint -operation to highly privileged users. +server certificate. These OIDC IdPs include Auth0, GitHub, Google, and those that use an +Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy +thumbprint remains in your configuration, but is no longer used for validation. Trust for +the OIDC provider is derived from the provider certificate and is validated by the +thumbprint. Therefore, it is best to limit access to the +UpdateOpenIDConnectProviderThumbprint operation to highly privileged users. # Arguments - `open_idconnect_provider_arn`: The Amazon Resource Name (ARN) of the IAM OIDC provider diff --git a/src/services/inspector2.jl b/src/services/inspector2.jl index 2db5d98077..239582bf6f 100644 --- a/src/services/inspector2.jl +++ b/src/services/inspector2.jl @@ -71,6 +71,45 @@ function batch_get_account_status( ) end +""" + batch_get_code_snippet(finding_arns) + batch_get_code_snippet(finding_arns, params::Dict{String,<:Any}) + +Retrieves code snippets from findings that Amazon Inspector detected code vulnerabilities +in. + +# Arguments +- `finding_arns`: An array of finding ARNs for the findings you want to retrieve code + snippets from. + +""" +function batch_get_code_snippet( + findingArns; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/codesnippet/batchget", + Dict{String,Any}("findingArns" => findingArns); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_code_snippet( + findingArns, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/codesnippet/batchget", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("findingArns" => findingArns), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_get_free_trial_info(account_ids) batch_get_free_trial_info(account_ids, params::Dict{String,<:Any}) @@ -218,6 +257,41 @@ function cancel_findings_report( ) end +""" + cancel_sbom_export(report_id) + cancel_sbom_export(report_id, params::Dict{String,<:Any}) + +Cancels a software bill of materials (SBOM) report. + +# Arguments +- `report_id`: The report ID of the SBOM export to cancel. + +""" +function cancel_sbom_export(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/sbomexport/cancel", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function cancel_sbom_export( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/cancel", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_filter(action, filter_criteria, name) create_filter(action, filter_criteria, name, params::Dict{String,<:Any}) @@ -324,6 +398,55 @@ function create_findings_report( ) end +""" + create_sbom_export(report_format, s3_destination) + create_sbom_export(report_format, s3_destination, params::Dict{String,<:Any}) + +Creates a software bill of materials (SBOM) report. + +# Arguments +- `report_format`: The output format for the software bill of materials (SBOM) report. +- `s3_destination`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"resourceFilterCriteria"`: The resource filter criteria for the software bill of + materials (SBOM) report. +""" +function create_sbom_export( + reportFormat, s3Destination; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "POST", + "/sbomexport/create", + Dict{String,Any}("reportFormat" => reportFormat, "s3Destination" => s3Destination); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_sbom_export( + reportFormat, + s3Destination, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "reportFormat" => reportFormat, "s3Destination" => s3Destination + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_filter(arn) delete_filter(arn, params::Dict{String,<:Any}) @@ -666,6 +789,49 @@ function get_ec2_deep_inspection_configuration( ) end +""" + get_encryption_key(resource_type, scan_type) + get_encryption_key(resource_type, scan_type, params::Dict{String,<:Any}) + +Gets an encryption key. + +# Arguments +- `resource_type`: The resource type the key encrypts. +- `scan_type`: The scan type the key encrypts. + +""" +function get_encryption_key( + resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "GET", + "/encryptionkey/get", + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_encryption_key( + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "GET", + "/encryptionkey/get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_findings_report_status() get_findings_report_status(params::Dict{String,<:Any}) @@ -732,6 +898,41 @@ function get_member( ) end +""" + get_sbom_export(report_id) + get_sbom_export(report_id, params::Dict{String,<:Any}) + +Gets details of a software bill of materials (SBOM) report. + +# Arguments +- `report_id`: The report ID of the SBOM export to get details for. + +""" +function get_sbom_export(reportId; aws_config::AbstractAWSConfig=global_aws_config()) + return inspector2( + "POST", + "/sbomexport/get", + Dict{String,Any}("reportId" => reportId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_sbom_export( + reportId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "POST", + "/sbomexport/get", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("reportId" => reportId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_account_permissions() list_account_permissions(params::Dict{String,<:Any}) @@ -1086,6 +1287,50 @@ function list_usage_totals( ) end +""" + reset_encryption_key(resource_type, scan_type) + reset_encryption_key(resource_type, scan_type, params::Dict{String,<:Any}) + +Resets an encryption key. After the key is reset your resources will be encrypted by an +Amazon Web Services owned key. + +# Arguments +- `resource_type`: The resource type the key encrypts. +- `scan_type`: The scan type the key encrypts. + +""" +function reset_encryption_key( + resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/encryptionkey/reset", + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reset_encryption_key( + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/encryptionkey/reset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceType" => resourceType, "scanType" => scanType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_vulnerabilities(filter_criteria) search_vulnerabilities(filter_criteria, params::Dict{String,<:Any}) @@ -1281,6 +1526,58 @@ function update_ec2_deep_inspection_configuration( ) end +""" + update_encryption_key(kms_key_id, resource_type, scan_type) + update_encryption_key(kms_key_id, resource_type, scan_type, params::Dict{String,<:Any}) + +Updates an encryption key. A ResourceNotFoundException means that an AWS owned key is being +used for encryption. + +# Arguments +- `kms_key_id`: A KMS key ID for the encryption key. +- `resource_type`: The resource type for the encryption key. +- `scan_type`: The scan type for the encryption key. + +""" +function update_encryption_key( + kmsKeyId, resourceType, scanType; aws_config::AbstractAWSConfig=global_aws_config() +) + return inspector2( + "PUT", + "/encryptionkey/update", + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, "resourceType" => resourceType, "scanType" => scanType + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_encryption_key( + kmsKeyId, + resourceType, + scanType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return inspector2( + "PUT", + "/encryptionkey/update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "kmsKeyId" => kmsKeyId, + "resourceType" => resourceType, + "scanType" => scanType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_filter(filter_arn) update_filter(filter_arn, params::Dict{String,<:Any}) diff --git a/src/services/internetmonitor.jl b/src/services/internetmonitor.jl index a8371f117f..1268a5301f 100644 --- a/src/services/internetmonitor.jl +++ b/src/services/internetmonitor.jl @@ -29,6 +29,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. Don't reuse the same client token for other API requests. +- `"HealthEventsConfig"`: Defines the health event threshold percentages, for performance + score and availability score. Internet Monitor creates a health event when there's an + internet issue that affects your application end users where a health score percentage is + at or below a set threshold. If you don't set a health event threshold, the default calue + is 95%. - `"InternetMeasurementsLogDelivery"`: Publish internet measurements for Internet Monitor to an Amazon S3 bucket in addition to CloudWatch Logs. - `"MaxCityNetworksToMonitor"`: The maximum number of city-networks to monitor for your @@ -397,6 +402,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: A unique, case-sensitive string of up to 64 ASCII characters that you specify to make an idempotent API request. You should not reuse the same client token for other API requests. +- `"HealthEventsConfig"`: The list of health event thresholds. A health event threshold + percentage, for performance and availability, determines when Internet Monitor creates a + health event when there's an internet issue that affects your application end users. - `"InternetMeasurementsLogDelivery"`: Publish internet measurements for Internet Monitor to another location, such as an Amazon S3 bucket. The measurements are also published to Amazon CloudWatch Logs. diff --git a/src/services/ivs.jl b/src/services/ivs.jl index eefc45caa5..fb2abc1d4f 100644 --- a/src/services/ivs.jl +++ b/src/services/ivs.jl @@ -66,6 +66,44 @@ function batch_get_stream_key( ) end +""" + batch_start_viewer_session_revocation(viewer_sessions) + batch_start_viewer_session_revocation(viewer_sessions, params::Dict{String,<:Any}) + +Performs StartViewerSessionRevocation on multiple channel ARN and viewer ID pairs +simultaneously. + +# Arguments +- `viewer_sessions`: Array of viewer sessions, one per channel-ARN and viewer-ID pair. + +""" +function batch_start_viewer_session_revocation( + viewerSessions; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/BatchStartViewerSessionRevocation", + Dict{String,Any}("viewerSessions" => viewerSessions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_start_viewer_session_revocation( + viewerSessions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs( + "POST", + "/BatchStartViewerSessionRevocation", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("viewerSessions" => viewerSessions), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_channel() create_channel(params::Dict{String,<:Any}) @@ -164,6 +202,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"recordingReconnectWindowSeconds"`: If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together. Default: 0. +- `"renditionConfiguration"`: Object that describes which renditions should be recorded for + a stream. - `"tags"`: Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific @@ -932,6 +972,58 @@ function put_metadata( ) end +""" + start_viewer_session_revocation(channel_arn, viewer_id) + start_viewer_session_revocation(channel_arn, viewer_id, params::Dict{String,<:Any}) + +Starts the process of revoking the viewer session associated with a specified channel ARN +and viewer ID. Optionally, you can provide a version to revoke viewer sessions less than +and including that version. For instructions on associating a viewer ID with a viewer +session, see Setting Up Private Channels. + +# Arguments +- `channel_arn`: The ARN of the channel associated with the viewer session to revoke. +- `viewer_id`: The ID of the viewer associated with the viewer session to revoke. Do not + use this field for personally identifying, confidential, or sensitive information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"viewerSessionVersionsLessThanOrEqualTo"`: An optional filter on which versions of the + viewer session to revoke. All versions less than or equal to the specified version will be + revoked. Default: 0. +""" +function start_viewer_session_revocation( + channelArn, viewerId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs( + "POST", + "/StartViewerSessionRevocation", + Dict{String,Any}("channelArn" => channelArn, "viewerId" => viewerId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_viewer_session_revocation( + channelArn, + viewerId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs( + "POST", + "/StartViewerSessionRevocation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("channelArn" => channelArn, "viewerId" => viewerId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_stream(channel_arn) stop_stream(channel_arn, params::Dict{String,<:Any}) diff --git a/src/services/kafka.jl b/src/services/kafka.jl index 79bbe07693..faceddbc39 100644 --- a/src/services/kafka.jl +++ b/src/services/kafka.jl @@ -575,6 +575,41 @@ function describe_cluster_operation( ) end +""" + describe_cluster_operation_v2(cluster_operation_arn) + describe_cluster_operation_v2(cluster_operation_arn, params::Dict{String,<:Any}) + + + Returns a description of the cluster operation specified by the ARN. + +# Arguments +- `cluster_operation_arn`: ARN of the cluster operation to describe. + +""" +function describe_cluster_operation_v2( + clusterOperationArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/api/v2/operations/$(clusterOperationArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_cluster_operation_v2( + clusterOperationArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "GET", + "/api/v2/operations/$(clusterOperationArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_cluster_v2(cluster_arn) describe_cluster_v2(cluster_arn, params::Dict{String,<:Any}) @@ -918,6 +953,46 @@ function list_cluster_operations( ) end +""" + list_cluster_operations_v2(cluster_arn) + list_cluster_operations_v2(cluster_arn, params::Dict{String,<:Any}) + + + Returns a list of all the operations that have been performed on the specified +MSK cluster. + +# Arguments +- `cluster_arn`: The arn of the cluster whose operations are being requested. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maxResults of the query. +- `"nextToken"`: The nextToken of the query. +""" +function list_cluster_operations_v2( + clusterArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kafka( + "GET", + "/api/v2/clusters/$(clusterArn)/operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_cluster_operations_v2( + clusterArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kafka( + "GET", + "/api/v2/clusters/$(clusterArn)/operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_clusters() list_clusters(params::Dict{String,<:Any}) diff --git a/src/services/kendra.jl b/src/services/kendra.jl index 4f8173e822..ef2b7b0171 100644 --- a/src/services/kendra.jl +++ b/src/services/kendra.jl @@ -591,9 +591,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: A description for the FAQ. - `"FileFormat"`: The format of the FAQ input file. You can choose between a basic CSV format, a CSV format that includes customs attributes in a header, and a JSON format that - includes custom attributes. The format must match the format of the file stored in the S3 - bucket identified in the S3Path parameter. For more information, see Adding questions and - answers. + includes custom attributes. The default format is CSV. The format must match the format of + the file stored in the S3 bucket identified in the S3Path parameter. For more information, + see Adding questions and answers. - `"LanguageCode"`: The code for a language. This allows you to support a language for the FAQ document. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English. @@ -2473,50 +2473,51 @@ end query(index_id) query(index_id, params::Dict{String,<:Any}) -Searches an active index. Use this API to search your documents using query. The Query API -enables to do faceted search and to filter results based on document attributes. It also -enables you to provide user context that Amazon Kendra uses to enforce document access -control in the search results. Amazon Kendra searches your index for text content and -question and answer (FAQ) content. By default the response contains three types of results. - Relevant passages Matching FAQs Relevant documents You can specify that the query -return only one type of result using the QueryResultTypeFilter parameter. Each query -returns the 100 most relevant results. +Searches an index given an input query. You can configure boosting or relevance tuning at +the query level to override boosting at the index level, filter based on document +fields/attributes and faceted search, and filter based on the user or their group access to +documents. You can also include certain fields in the response that might provide useful +additional information. A query response contains three types of results. Relevant +suggested answers. The answers can be either a text excerpt or table excerpt. The answer +can be highlighted in the excerpt. Matching FAQs or questions-answer from your FAQ file. + Relevant documents. This result type includes an excerpt of the document with the document +title. The searched terms can be highlighted in the excerpt. You can specify that the +query return only one type of result using the QueryResultTypeFilter parameter. Each query +returns the 100 most relevant results. If you filter result type to only question-answers, +a maximum of four results are returned. If you filter result type to only answers, a +maximum of three results are returned. # Arguments -- `index_id`: The identifier of the index to search. The identifier is returned in the - response from the CreateIndex API. +- `index_id`: The identifier of the index for the search. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AttributeFilter"`: Enables filtered searches based on document attributes. You can only +- `"AttributeFilter"`: Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters - parameters contain a list of other filters. The AttributeFilter parameter enables you to + parameters contain a list of other filters. The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results. - `"DocumentRelevanceOverrideConfigurations"`: Overrides relevance tuning configurations of - fields or attributes set at the index level. If you use this API to override the relevance + fields/attributes set at the index level. If you use this API to override the relevance tuning configured at the index level, but there is no relevance tuning configured at the index level, then Amazon Kendra does not apply any relevance tuning. If there is relevance - tuning configured at the index level, but you do not use this API to override any relevance - tuning in the index, then Amazon Kendra uses the relevance tuning that is configured at the - index level. If there is relevance tuning configured for fields at the index level, but you - use this API to override only some of these fields, then for the fields you did not - override, the importance is set to 1. -- `"Facets"`: An array of documents attributes. Amazon Kendra returns a count for each - attribute key specified. This helps your users narrow their search. + tuning configured for fields at the index level, and you use this API to override only some + of these fields, then for the fields you did not override, the importance is set to 1. +- `"Facets"`: An array of documents fields/attributes for faceted search. Amazon Kendra + returns a count for each field key specified. This helps your users narrow their search. - `"PageNumber"`: Query results are returned in pages the size of the PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this parameter to get result pages after the first one. - `"PageSize"`: Sets the number of results that are returned in each page of results. The default page size is 10. The maximum number of results returned is 100. If you ask for more than 100 results, only 100 are returned. -- `"QueryResultTypeFilter"`: Sets the type of query. Only results for the specified query - type are returned. +- `"QueryResultTypeFilter"`: Sets the type of query result or response. Only results for + the specified type are returned. - `"QueryText"`: The input query text for the search. Amazon Kendra truncates queries at 30 token words, which excludes punctuation and stop words. Truncation still applies if you use Boolean or more advanced, complex queries. -- `"RequestedDocumentAttributes"`: An array of document attributes to include in the - response. You can limit the response to include certain document attributes. By default all +- `"RequestedDocumentAttributes"`: An array of document fields/attributes to include in the + response. You can limit the response to include certain document fields. By default, all document attributes are included in the response. - `"SortingConfiguration"`: Provides information that determines how the results of the query are sorted. You can set the field that Amazon Kendra should sort the results on, and @@ -2549,6 +2550,80 @@ function query( ) end +""" + retrieve(index_id, query_text) + retrieve(index_id, query_text, params::Dict{String,<:Any}) + +Retrieves relevant passages or text excerpts given an input query. This API is similar to +the Query API. However, by default, the Query API only returns excerpt passages of up to +100 token words. With the Retrieve API, you can retrieve longer passages of up to 200 token +words and up to 100 semantically relevant passages. This doesn't include question-answer or +FAQ type responses from your index. The passages are text excerpts that can be semantically +extracted from multiple documents and multiple parts of the same document. If in extreme +cases your documents produce no relevant passages using the Retrieve API, you can +alternatively use the Query API. You can also do the following: Override boosting at the +index level Filter based on document fields or attributes Filter based on the user or +their group access to documents You can also include certain fields in the response that +might provide useful additional information. + +# Arguments +- `index_id`: The identifier of the index to retrieve relevant passages for the search. +- `query_text`: The input query text to retrieve relevant passages for the search. Amazon + Kendra truncates queries at 30 token words, which excludes punctuation and stop words. + Truncation still applies if you use Boolean or more advanced, complex queries. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributeFilter"`: Filters search results by document fields/attributes. You can only + provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters + parameters contain a list of other filters. The AttributeFilter parameter means you can + create a set of filtering rules that a document must satisfy to be included in the query + results. +- `"DocumentRelevanceOverrideConfigurations"`: Overrides relevance tuning configurations of + fields/attributes set at the index level. If you use this API to override the relevance + tuning configured at the index level, but there is no relevance tuning configured at the + index level, then Amazon Kendra does not apply any relevance tuning. If there is relevance + tuning configured for fields at the index level, and you use this API to override only some + of these fields, then for the fields you did not override, the importance is set to 1. +- `"PageNumber"`: Retrieved relevant passages are returned in pages the size of the + PageSize parameter. By default, Amazon Kendra returns the first page of results. Use this + parameter to get result pages after the first one. +- `"PageSize"`: Sets the number of retrieved relevant passages that are returned in each + page of results. The default page size is 10. The maximum number of results returned is + 100. If you ask for more than 100 results, only 100 are returned. +- `"RequestedDocumentAttributes"`: A list of document fields/attributes to include in the + response. You can limit the response to include certain document fields. By default, all + document fields are included in the response. +- `"UserContext"`: The user context token or user and group information. +""" +function retrieve(IndexId, QueryText; aws_config::AbstractAWSConfig=global_aws_config()) + return kendra( + "Retrieve", + Dict{String,Any}("IndexId" => IndexId, "QueryText" => QueryText); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function retrieve( + IndexId, + QueryText, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kendra( + "Retrieve", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IndexId" => IndexId, "QueryText" => QueryText), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_data_source_sync_job(id, index_id) start_data_source_sync_job(id, index_id, params::Dict{String,<:Any}) diff --git a/src/services/kinesis_video.jl b/src/services/kinesis_video.jl index 90cbc49462..b10303df43 100644 --- a/src/services/kinesis_video.jl +++ b/src/services/kinesis_video.jl @@ -75,7 +75,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys implementation, Kinesis Video Streams does not use this name. - `"KmsKeyId"`: The ID of the Key Management Service (KMS) key that you want Kinesis Video Streams to use to encrypt stream data. If no key ID is specified, the default, Kinesis - Video-managed key (aws/kinesisvideo) is used. For more information, see DescribeKey. + Video-managed key (Amazon Web Services/kinesisvideo) is used. For more information, see + DescribeKey. - `"MediaType"`: The media type of the stream. Consumers of the stream can use this information when processing the stream. For more information about media types, see Media Types. If you choose to specify the MediaType, see Naming Requirements for guidelines. @@ -109,6 +110,45 @@ function create_stream( ) end +""" + delete_edge_configuration() + delete_edge_configuration(params::Dict{String,<:Any}) + +An asynchronous API that deletes a stream’s existing edge configuration, as well as the +corresponding media from the Edge Agent. When you invoke this API, the sync status is set +to DELETING. A deletion process starts, in which active edge jobs are stopped and all media +is deleted from the edge device. The time to delete varies, depending on the total amount +of stored media. If the deletion process fails, the sync status changes to DELETE_FAILED. +You will need to re-try the deletion. When the deletion process has completed successfully, +the edge configuration is no longer accessible. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamARN"`: The Amazon Resource Name (ARN) of the stream. Specify either the + StreamName or the StreamARN. +- `"StreamName"`: The name of the stream from which to delete the edge configuration. + Specify either the StreamName or the StreamARN. +""" +function delete_edge_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return kinesis_video( + "POST", + "/deleteEdgeConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_edge_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/deleteEdgeConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_signaling_channel(channel_arn) delete_signaling_channel(channel_arn, params::Dict{String,<:Any}) @@ -205,8 +245,10 @@ end describe_edge_configuration(params::Dict{String,<:Any}) Describes a stream’s edge configuration that was set using the -StartEdgeConfigurationUpdate API. Use this API to get the status of the configuration if -the configuration is in sync with the Edge Agent. +StartEdgeConfigurationUpdate API and the latest status of the edge agent's recorder and +uploader jobs. Use this API to get the status of the configuration to determine if the +configuration is in sync with the Edge Agent. Use this API to evaluate the health of the +Edge Agent. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -275,9 +317,8 @@ end describe_mapped_resource_configuration() describe_mapped_resource_configuration(params::Dict{String,<:Any}) -Returns the most current information about the stream. Either streamName or streamARN -should be provided in the input. Returns the most current information about the stream. The -streamName or streamARN should be provided in the input. +Returns the most current information about the stream. The streamName or streamARN should +be provided in the input. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -531,6 +572,52 @@ function get_signaling_channel_endpoint( ) end +""" + list_edge_agent_configurations(hub_device_arn) + list_edge_agent_configurations(hub_device_arn, params::Dict{String,<:Any}) + +Returns an array of edge configurations associated with the specified Edge Agent. In the +request, you must specify the Edge Agent HubDeviceArn. + +# Arguments +- `hub_device_arn`: The \"Internet of Things (IoT) Thing\" Arn of the edge agent. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of edge configurations to return in the response. The + default is 5. +- `"NextToken"`: If you specify this parameter, when the result of a + ListEdgeAgentConfigurations operation is truncated, the call returns the NextToken in the + response. To get another batch of edge configurations, provide this token in your next + request. +""" +function list_edge_agent_configurations( + HubDeviceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/listEdgeAgentConfigurations", + Dict{String,Any}("HubDeviceArn" => HubDeviceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_edge_agent_configurations( + HubDeviceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_video( + "POST", + "/listEdgeAgentConfigurations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("HubDeviceArn" => HubDeviceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_signaling_channels() list_signaling_channels(params::Dict{String,<:Any}) diff --git a/src/services/kms.jl b/src/services/kms.jl index 128206e809..9ad05d959e 100644 --- a/src/services/kms.jl +++ b/src/services/kms.jl @@ -426,6 +426,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys constraint cannot exceed 384 characters. For information about grant constraints, see Using grant constraints in the Key Management Service Developer Guide. For more information about encryption context, see Encryption context in the Key Management Service Developer Guide . +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -774,6 +777,9 @@ operations: Encrypt GenerateDataKey GenerateDataKeyPair ReEncryp # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionAlgorithm"`: Specifies the encryption algorithm that will be used to decrypt the ciphertext. Specify the same algorithm that was used to encrypt the data. If you specify a different algorithm, the Decrypt operation fails. This parameter is required only @@ -1417,6 +1423,9 @@ permissions: kms:Encrypt (key policy) Related operations: Decrypt Gener # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionAlgorithm"`: Specifies the encryption algorithm that KMS will use to encrypt the plaintext message. The algorithm must be compatible with the KMS key that you specify. This parameter is required only for asymmetric KMS keys. The default value, @@ -1533,6 +1542,9 @@ GenerateDataKeyPairWithoutPlaintext GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the data key. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output. An encryption @@ -1660,6 +1672,9 @@ GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the private key in the data key pair. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other @@ -1774,6 +1789,9 @@ GenerateDataKeyPair GenerateDataKeyWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the private key in the data key pair. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other @@ -1874,6 +1892,9 @@ GenerateDataKeyPairWithoutPlaintext # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"EncryptionContext"`: Specifies the encryption context that will be used when encrypting the data key. Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output. An encryption @@ -1953,6 +1974,9 @@ kms:GenerateMac (key policy) Related operations: VerifyMac # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -2923,6 +2947,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended. For more information, see Encryption context in the Key Management Service Developer Guide. +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -3169,6 +3196,9 @@ ListRetirableGrants RevokeGrant # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantId"`: Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants, or ListRetirableGrants. Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 @@ -3218,6 +3248,11 @@ ListGrants ListRetirableGrants RetireGrant arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. """ function revoke_grant(GrantId, KeyId; aws_config::AbstractAWSConfig=global_aws_config()) return kms( @@ -3258,30 +3293,29 @@ KMS key. After the waiting period ends, KMS deletes the KMS key, its key materia KMS data associated with it, including all aliases that refer to it. Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region -replica key, or an asymmetric or HMAC KMS key with imported key material[BUGBUG-link to -importing-keys-managing.html#import-delete-key.) To prevent the use of a KMS key without -deleting it, use DisableKey. You can schedule the deletion of a multi-Region primary key -and its replica keys at any time. However, KMS will not delete a multi-Region primary key -with existing replica keys. If you schedule the deletion of a primary key with replicas, -its key state changes to PendingReplicaDeletion and it cannot be replicated or used in -cryptographic operations. This status can continue indefinitely. When the last of its -replicas keys is deleted (not just scheduled), the key state of the primary key changes to -PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see -Deleting multi-Region keys in the Key Management Service Developer Guide. When KMS deletes -a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key -material from the associated CloudHSM cluster. However, you might need to manually delete -the orphaned key material from the cluster and its backups. Deleting a KMS key from an -external key store has no effect on the associated external key. However, for both types of -custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt -ciphertext encrypted under the KMS key by using only its associated external key or -CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a -new KMS key with the same key material. For more information about scheduling a KMS key for -deletion, see Deleting KMS keys in the Key Management Service Developer Guide. The KMS key -that you use for this operation must be in a compatible key state. For details, see Key -states of KMS keys in the Key Management Service Developer Guide. Cross-account use: No. -You cannot perform this operation on a KMS key in a different Amazon Web Services account. -Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations -CancelKeyDeletion DisableKey +replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the +use of a KMS key without deleting it, use DisableKey. You can schedule the deletion of a +multi-Region primary key and its replica keys at any time. However, KMS will not delete a +multi-Region primary key with existing replica keys. If you schedule the deletion of a +primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be +replicated or used in cryptographic operations. This status can continue indefinitely. When +the last of its replicas keys is deleted (not just scheduled), the key state of the primary +key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For +details, see Deleting multi-Region keys in the Key Management Service Developer Guide. When +KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the +associated key material from the associated CloudHSM cluster. However, you might need to +manually delete the orphaned key material from the cluster and its backups. Deleting a KMS +key from an external key store has no effect on the associated external key. However, for +both types of custom key stores, deleting a KMS key is destructive and irreversible. You +cannot decrypt ciphertext encrypted under the KMS key by using only its associated external +key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by +creating a new KMS key with the same key material. For more information about scheduling a +KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide. +The KMS key that you use for this operation must be in a compatible key state. For details, +see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account +use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services +account. Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations + CancelKeyDeletion DisableKey # Arguments - `key_id`: The unique identifier of the KMS key to delete. Specify the key ID or key ARN @@ -3374,6 +3408,9 @@ parameter. Required permissions: kms:Sign (key policy) Related operations: Ver # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -3949,6 +3986,9 @@ parameter. Required permissions: kms:Verify (key policy) Related operations: # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service @@ -4053,6 +4093,9 @@ Related operations: GenerateMac # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: Checks if your request will succeed. DryRun is an optional parameter. To + learn more about how to use this parameter, see Testing your KMS API calls in the Key + Management Service Developer Guide. - `"GrantTokens"`: A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service diff --git a/src/services/lakeformation.jl b/src/services/lakeformation.jl index 4cb56b67ba..955ed8a316 100644 --- a/src/services/lakeformation.jl +++ b/src/services/lakeformation.jl @@ -986,8 +986,8 @@ function get_table_objects( end """ - get_temporary_glue_partition_credentials(partition, supported_permission_types, table_arn) - get_temporary_glue_partition_credentials(partition, supported_permission_types, table_arn, params::Dict{String,<:Any}) + get_temporary_glue_partition_credentials(partition, table_arn) + get_temporary_glue_partition_credentials(partition, table_arn, params::Dict{String,<:Any}) This API is identical to GetTemporaryTableCredentials except that this is used when the target Data Catalog resource is of type Partition. Lake Formation restricts the permission @@ -996,8 +996,6 @@ single Amazon S3 prefix. # Arguments - `partition`: A list of partition values identifying a single partition. -- `supported_permission_types`: A list of supported permission types for the partition. - Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. - `table_arn`: The ARN of the partitions' table. # Optional Parameters @@ -1008,28 +1006,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the temporary credentials. - `"Permissions"`: Filters the request based on the user having been granted a list of specified permissions on the requested resource(s). +- `"SupportedPermissionTypes"`: A list of supported permission types for the partition. + Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. """ function get_temporary_glue_partition_credentials( - Partition, - SupportedPermissionTypes, - TableArn; - aws_config::AbstractAWSConfig=global_aws_config(), + Partition, TableArn; aws_config::AbstractAWSConfig=global_aws_config() ) return lakeformation( "POST", "/GetTemporaryGluePartitionCredentials", - Dict{String,Any}( - "Partition" => Partition, - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ); + Dict{String,Any}("Partition" => Partition, "TableArn" => TableArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_temporary_glue_partition_credentials( Partition, - SupportedPermissionTypes, TableArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1040,11 +1032,7 @@ function get_temporary_glue_partition_credentials( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "Partition" => Partition, - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ), + Dict{String,Any}("Partition" => Partition, "TableArn" => TableArn), params, ), ); @@ -1054,8 +1042,8 @@ function get_temporary_glue_partition_credentials( end """ - get_temporary_glue_table_credentials(supported_permission_types, table_arn) - get_temporary_glue_table_credentials(supported_permission_types, table_arn, params::Dict{String,<:Any}) + get_temporary_glue_table_credentials(table_arn) + get_temporary_glue_table_credentials(table_arn, params::Dict{String,<:Any}) Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a @@ -1063,8 +1051,6 @@ registered location, for example an Amazon S3 bucket, with a scope down policy w restricts the access to a single prefix. # Arguments -- `supported_permission_types`: A list of supported permission types for the table. Valid - values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. - `table_arn`: The ARN identifying a table in the Data Catalog for the temporary credentials request. @@ -1076,22 +1062,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the temporary credentials. - `"Permissions"`: Filters the request based on the user having been granted a list of specified permissions on the requested resource(s). +- `"SupportedPermissionTypes"`: A list of supported permission types for the table. Valid + values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION. """ function get_temporary_glue_table_credentials( - SupportedPermissionTypes, TableArn; aws_config::AbstractAWSConfig=global_aws_config() + TableArn; aws_config::AbstractAWSConfig=global_aws_config() ) return lakeformation( "POST", "/GetTemporaryGlueTableCredentials", - Dict{String,Any}( - "SupportedPermissionTypes" => SupportedPermissionTypes, "TableArn" => TableArn - ); + Dict{String,Any}("TableArn" => TableArn); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_temporary_glue_table_credentials( - SupportedPermissionTypes, TableArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1100,14 +1085,7 @@ function get_temporary_glue_table_credentials( "POST", "/GetTemporaryGlueTableCredentials", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "SupportedPermissionTypes" => SupportedPermissionTypes, - "TableArn" => TableArn, - ), - params, - ), + mergewith(_merge, Dict{String,Any}("TableArn" => TableArn), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, diff --git a/src/services/lambda.jl b/src/services/lambda.jl index 5182bc5162..c497b643d3 100644 --- a/src/services/lambda.jl +++ b/src/services/lambda.jl @@ -367,10 +367,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SourceAccessConfigurations"`: An array of authentication protocols or VPC components required to secure your event source. - `"StartingPosition"`: The position in a stream from which to start reading. Required for - Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is supported - only for Amazon Kinesis streams and Amazon DocumentDB. + Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for + Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka. - `"StartingPositionTimestamp"`: With StartingPosition set to AT_TIMESTAMP, the time from - which to start reading. + which to start reading. StartingPositionTimestamp cannot be in the future. - `"Topics"`: The name of the Kafka topic. - `"TumblingWindowInSeconds"`: (Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds @@ -711,10 +711,11 @@ end delete_function(function_name, params::Dict{String,<:Any}) Deletes a Lambda function. To delete a specific function version, use the Qualifier -parameter. Otherwise, all versions and aliases are deleted. To delete Lambda event source -mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and -resources that invoke your function directly, delete the trigger in the service where you -originally configured it. +parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user +to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that +invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and resources that +invoke your function directly, delete the trigger in the service where you originally +configured it. # Arguments - `function_name`: The name of the Lambda function or version. Name formats Function diff --git a/src/services/lex_models_v2.jl b/src/services/lex_models_v2.jl index b48a4d2a65..79d2bb3166 100644 --- a/src/services/lex_models_v2.jl +++ b/src/services/lex_models_v2.jl @@ -2705,6 +2705,262 @@ function list_imports( ) end +""" + list_intent_metrics(bot_id, end_date_time, metrics, start_date_time) + list_intent_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary metrics for the intents in your bot. The following fields are required: + metrics – A list of AnalyticsIntentMetric objects. In each object, use the name field to +specify the metric to calculate, the statistic field to specify whether to calculate the +Sum, Average, or Max number, and the order field to specify whether to sort the results in +Ascending or Descending order. startDateTime and endDateTime – Define a time range for +which you want to retrieve results. Of the optional fields, you can organize the results +in the following ways: Use the filters field to filter the results, the groupBy field to +specify categories by which to group the results, and the binBy field to specify time +intervals by which to group the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. Note that an order field +exists in both binBy and metrics. You can specify only one order in a given request. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve intent metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the order by which to organize the results. +- `start_date_time`: The timestamp that marks the beginning of the range of time for which + you want to see intent metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: IntentName – The name of the intent. + IntentEndState – The final state of the intent. The possible end states are detailed in + Key definitions in the user guide. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListIntentMetrics operation contains more results + than specified in the maxResults parameter, a token is returned in the response. Use the + returned token in the nextToken parameter of a ListIntentMetrics request to return the next + page of results. For a complete set of results, call the ListIntentMetrics operation until + the nextToken returned in the response is null. +""" +function list_intent_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentmetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentmetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_intent_paths(bot_id, end_date_time, intent_path, start_date_time) + list_intent_paths(bot_id, end_date_time, intent_path, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary statistics for a path of intents that users take over sessions with your +bot. The following fields are required: startDateTime and endDateTime – Define a time +range for which you want to retrieve results. intentPath – Define an order of intents +for which you want to retrieve metrics. Separate intents in the path with a forward slash. +For example, populate the intentPath field with /BookCar/BookHotel to see details about how +many times users invoked the BookCar and BookHotel intents in that order. Use the +optional filters field to filter the results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve intent path metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent path metrics. +- `intent_path`: The intent path for which you want to retrieve metrics. Use a forward + slash to separate intents in the path. For example: /BookCar /BookCar/BookHotel + /BookHotel/BookCar +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see intent path metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each describes a condition by which you want to filter + the results. +""" +function list_intent_paths( + botId, + endDateTime, + intentPath, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentpaths", + Dict{String,Any}( + "endDateTime" => endDateTime, + "intentPath" => intentPath, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_paths( + botId, + endDateTime, + intentPath, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentpaths", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "intentPath" => intentPath, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_intent_stage_metrics(bot_id, end_date_time, metrics, start_date_time) + list_intent_stage_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary metrics for the stages within intents in your bot. The following fields +are required: metrics – A list of AnalyticsIntentStageMetric objects. In each object, +use the name field to specify the metric to calculate, the statistic field to specify +whether to calculate the Sum, Average, or Max number, and the order field to specify +whether to sort the results in Ascending or Descending order. startDateTime and +endDateTime – Define a time range for which you want to retrieve results. Of the +optional fields, you can organize the results in the following ways: Use the filters +field to filter the results, the groupBy field to specify categories by which to group the +results, and the binBy field to specify time intervals by which to group the results. Use +the maxResults field to limit the number of results to return in a single response and the +nextToken field to return the next batch of results if the response does not return the +full set of results. Note that an order field exists in both binBy and metrics. You can +only specify one order in a given request. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve intent stage metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see intent stage metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see intent stage metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: IntentStageName – The name of the intent stage. + SwitchedToIntent – The intent to which the conversation was switched (if any). +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListIntentStageMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListIntentStageMetrics request to + return the next page of results. For a complete set of results, call the + ListIntentStageMetrics operation until the nextToken returned in the response is null. +""" +function list_intent_stage_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentstagemetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_intent_stage_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/intentstagemetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_intents(bot_id, bot_version, locale_id) list_intents(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) @@ -2813,6 +3069,168 @@ function list_recommended_intents( ) end +""" + list_session_analytics_data(bot_id, end_date_time, start_date_time) + list_session_analytics_data(bot_id, end_date_time, start_date_time, params::Dict{String,<:Any}) + +Retrieves a list of metadata for individual user sessions with your bot. The startDateTime +and endDateTime fields are required. These fields define a time range for which you want to +retrieve results. Of the optional fields, you can organize the results in the following +ways: Use the filters field to filter the results and the sortBy field to specify the +values by which to sort the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve session analytics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see session analytics. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see session analytics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListSessionAnalyticsData operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListSessionAnalyticsData request to + return the next page of results. For a complete set of results, call the + ListSessionAnalyticsData operation until the nextToken returned in the response is null. +- `"sortBy"`: An object specifying the measure and method by which to sort the session + analytics data. +""" +function list_session_analytics_data( + botId, endDateTime, startDateTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessions", + Dict{String,Any}("endDateTime" => endDateTime, "startDateTime" => startDateTime); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_session_analytics_data( + botId, + endDateTime, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, "startDateTime" => startDateTime + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_session_metrics(bot_id, end_date_time, metrics, start_date_time) + list_session_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + +Retrieves summary metrics for the user sessions with your bot. The following fields are +required: metrics – A list of AnalyticsSessionMetric objects. In each object, use the +name field to specify the metric to calculate, the statistic field to specify whether to +calculate the Sum, Average, or Max number, and the order field to specify whether to sort +the results in Ascending or Descending order. startDateTime and endDateTime – Define a +time range for which you want to retrieve results. Of the optional fields, you can +organize the results in the following ways: Use the filters field to filter the results, +the groupBy field to specify categories by which to group the results, and the binBy field +to specify time intervals by which to group the results. Use the maxResults field to +limit the number of results to return in a single response and the nextToken field to +return the next batch of results if the response does not return the full set of results. +Note that an order field exists in both binBy and metrics. Currently, you can specify it in +either field, but not in both. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve session metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see session metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see session metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: ConversationEndState – The final state of the + conversation. The possible end states are detailed in Key definitions in the user guide. + LocaleId – The unique identifier of the bot locale. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListSessionMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListSessionMetrics request to return + the next page of results. For a complete set of results, call the ListSessionMetrics + operation until the nextToken returned in the response is null. +""" +function list_session_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessionmetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_session_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/sessionmetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_slot_types(bot_id, bot_version, locale_id) list_slot_types(bot_id, bot_version, locale_id, params::Dict{String,<:Any}) @@ -3102,6 +3520,177 @@ function list_test_sets( ) end +""" + list_utterance_analytics_data(bot_id, end_date_time, start_date_time) + list_utterance_analytics_data(bot_id, end_date_time, start_date_time, params::Dict{String,<:Any}) + + To use this API operation, your IAM role must have permissions to perform the +ListAggregatedUtterances operation, which provides access to utterance-related analytics. +See Viewing utterance statistics for the IAM policy to apply to the IAM role. Retrieves a +list of metadata for individual user utterances to your bot. The following fields are +required: startDateTime and endDateTime – Define a time range for which you want to +retrieve results. Of the optional fields, you can organize the results in the following +ways: Use the filters field to filter the results and the sortBy field to specify the +values by which to sort the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve utterance analytics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see utterance analytics. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see utterance analytics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListUtteranceAnalyticsData operation contains + more results than specified in the maxResults parameter, a token is returned in the + response. Use the returned token in the nextToken parameter of a ListUtteranceAnalyticsData + request to return the next page of results. For a complete set of results, call the + ListUtteranceAnalyticsData operation until the nextToken returned in the response is null. +- `"sortBy"`: An object specifying the measure and method by which to sort the utterance + analytics data. +""" +function list_utterance_analytics_data( + botId, endDateTime, startDateTime; aws_config::AbstractAWSConfig=global_aws_config() +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterances", + Dict{String,Any}("endDateTime" => endDateTime, "startDateTime" => startDateTime); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_utterance_analytics_data( + botId, + endDateTime, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterances", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, "startDateTime" => startDateTime + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_utterance_metrics(bot_id, end_date_time, metrics, start_date_time) + list_utterance_metrics(bot_id, end_date_time, metrics, start_date_time, params::Dict{String,<:Any}) + + To use this API operation, your IAM role must have permissions to perform the +ListAggregatedUtterances operation, which provides access to utterance-related analytics. +See Viewing utterance statistics for the IAM policy to apply to the IAM role. Retrieves +summary metrics for the utterances in your bot. The following fields are required: +metrics – A list of AnalyticsUtteranceMetric objects. In each object, use the name field +to specify the metric to calculate, the statistic field to specify whether to calculate the +Sum, Average, or Max number, and the order field to specify whether to sort the results in +Ascending or Descending order. startDateTime and endDateTime – Define a time range for +which you want to retrieve results. Of the optional fields, you can organize the results +in the following ways: Use the filters field to filter the results, the groupBy field to +specify categories by which to group the results, and the binBy field to specify time +intervals by which to group the results. Use the maxResults field to limit the number of +results to return in a single response and the nextToken field to return the next batch of +results if the response does not return the full set of results. Note that an order field +exists in both binBy and metrics. Currently, you can specify it in either field, but not in +both. + +# Arguments +- `bot_id`: The identifier for the bot for which you want to retrieve utterance metrics. +- `end_date_time`: The date and time that marks the end of the range of time for which you + want to see utterance metrics. +- `metrics`: A list of objects, each of which contains a metric you want to list, the + statistic for the metric you want to return, and the method by which to organize the + results. +- `start_date_time`: The date and time that marks the beginning of the range of time for + which you want to see utterance metrics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"attributes"`: A list containing attributes related to the utterance that you want the + response to return. The following attributes are possible: LastUsedIntent – The last + used intent at the time of the utterance. +- `"binBy"`: A list of objects, each of which contains specifications for organizing the + results by time. +- `"filters"`: A list of objects, each of which describes a condition by which you want to + filter the results. +- `"groupBy"`: A list of objects, each of which specifies how to group the results. You can + group by the following criteria: UtteranceText – The transcription of the utterance. + UtteranceState – The state of the utterance. The possible states are detailed in Key + definitions in the user guide. +- `"maxResults"`: The maximum number of results to return in each page of results. If there + are fewer results than the maximum page size, only the actual number of results are + returned. +- `"nextToken"`: If the response from the ListUtteranceMetrics operation contains more + results than specified in the maxResults parameter, a token is returned in the response. + Use the returned token in the nextToken parameter of a ListUtteranceMetrics request to + return the next page of results. For a complete set of results, call the + ListUtteranceMetrics operation until the nextToken returned in the response is null. +""" +function list_utterance_metrics( + botId, + endDateTime, + metrics, + startDateTime; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterancemetrics", + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_utterance_metrics( + botId, + endDateTime, + metrics, + startDateTime, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lex_models_v2( + "POST", + "/bots/$(botId)/analytics/utterancemetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endDateTime" => endDateTime, + "metrics" => metrics, + "startDateTime" => startDateTime, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_associated_transcripts(bot_id, bot_recommendation_id, bot_version, filters, locale_id) search_associated_transcripts(bot_id, bot_recommendation_id, bot_version, filters, locale_id, params::Dict{String,<:Any}) diff --git a/src/services/location.jl b/src/services/location.jl index fb4a0ae3bc..6078bb8e0f 100644 --- a/src/services/location.jl +++ b/src/services/location.jl @@ -269,26 +269,26 @@ end batch_update_device_position(tracker_name, updates) batch_update_device_position(tracker_name, updates, params::Dict{String,<:Any}) -Uploads position update data for one or more devices to a tracker resource. Amazon Location -uses the data when it reports the last known device position and position history. Amazon -Location retains location data for 30 days. Position updates are handled based on the -PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, -updates are evaluated against linked geofence collections, and location data is stored at a -maximum of one position per 30 second interval. If your update frequency is more often than -every 30 seconds, only one update per 30 seconds is stored for each unique device ID. When -PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated -against linked geofence collections only if the device has moved more than 30 m (98.4 ft). -When PositionFiltering is set to AccuracyBased filtering, location data is stored and -evaluated against linked geofence collections only if the device has moved more than the -measured accuracy. For example, if two consecutive updates from a device have a horizontal -accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device -has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon -Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a -DevicePositionUpdate. +Uploads position update data for one or more devices to a tracker resource (up to 10 +devices per batch). Amazon Location uses the data when it reports the last known device +position and position history. Amazon Location retains location data for 30 days. Position +updates are handled based on the PositionFiltering property of the tracker. When +PositionFiltering is set to TimeBased, updates are evaluated against linked geofence +collections, and location data is stored at a maximum of one position per 30 second +interval. If your update frequency is more often than every 30 seconds, only one update per +30 seconds is stored for each unique device ID. When PositionFiltering is set to +DistanceBased filtering, location data is stored and evaluated against linked geofence +collections only if the device has moved more than 30 m (98.4 ft). When PositionFiltering +is set to AccuracyBased filtering, location data is stored and evaluated against linked +geofence collections only if the device has moved more than the measured accuracy. For +example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 +m, the second update is neither stored or evaluated if the device has moved less than 15 m. +If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default +value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate. # Arguments - `tracker_name`: The name of the tracker resource to update. -- `updates`: Contains the position update details for each device. +- `updates`: Contains the position update details for each device, up to 10 devices. """ function batch_update_device_position( @@ -384,6 +384,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys ValidationException error. If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error. Valid Values: [-180 to 180,-90 to 90] +- `"key"`: The optional API key to authorize the request. """ function calculate_route( CalculatorName, @@ -496,6 +497,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TruckModeOptions"`: Specifies route preferences when traveling by Truck, such as avoiding routes that use ferries or tolls, and truck specifications to consider when choosing an optimal road. Requirements: TravelMode must be specified as Truck. +- `"key"`: The optional API key to authorize the request. """ function calculate_route_matrix( CalculatorName, @@ -598,9 +600,8 @@ end create_key(key_name, restrictions, params::Dict{String,<:Any}) Creates an API key resource in your Amazon Web Services account, which lets you grant -geo:GetMap* actions for Amazon Location Map resources to the API key bearer. The API keys -feature is in preview. We may add, change, or remove features before announcing general -availability. For more information, see Using API keys. +actions for Amazon Location resources to the API key bearer. For more information, see +Using API keys. # Arguments - `key_name`: A custom name for the API key resource. Requirements: Contain only @@ -880,6 +881,9 @@ current and historical location of devices. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: An optional description for the tracker resource. +- `"EventBridgeEnabled"`: Whether to enable position UPDATE events from this tracker to be + sent to EventBridge. You do not need enable this feature to get ENTER and EXIT events for + geofences with this tracker. Those events are always sent to EventBridge. - `"KmsKeyId"`: A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN. - `"PositionFiltering"`: Specifies the position filtering for the tracker resource. Valid @@ -1171,9 +1175,7 @@ end describe_key(key_name) describe_key(key_name, params::Dict{String,<:Any}) -Retrieves the API key resource details. The API keys feature is in preview. We may add, -change, or remove features before announcing general availability. For more information, -see Using API keys. +Retrieves the API key resource details. # Arguments - `key_name`: The name of the API key resource. @@ -1702,6 +1704,7 @@ Region Data provider specified in the place index resource # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"key"`: The optional API key to authorize the request. - `"language"`: The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English. This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not @@ -1850,9 +1853,7 @@ end list_keys() list_keys(params::Dict{String,<:Any}) -Lists API key resources in your Amazon Web Services account. The API keys feature is in -preview. We may add, change, or remove features before announcing general availability. For -more information, see Using API keys. +Lists API key resources in your Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2101,6 +2102,11 @@ existing geofence if a geofence ID is included in the request. polygon or a circle. Including both will return a validation error. Each geofence polygon can have a maximum of 1,000 vertices. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"GeofenceProperties"`: Associates one of more properties with the geofence. A property + is a key-value pair stored with the geofence and added to any geofence event triggered with + that geofence. Format: \"key\" : \"value\" """ function put_geofence( CollectionName, GeofenceId, Geometry; aws_config::AbstractAWSConfig=global_aws_config() @@ -2158,6 +2164,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys not have a value for Greek, the result will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. Default value: 50 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_position( IndexName, Position; aws_config::AbstractAWSConfig=global_aws_config() @@ -2222,6 +2229,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only suggestions within the provided list of countries. Use the ISO 3166 3-digit country code. For example, Australia uses three upper-case characters: AUS. @@ -2236,6 +2248,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. The default: 5 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_suggestions( IndexName, Text; aws_config::AbstractAWSConfig=global_aws_config() @@ -2297,6 +2310,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys corner has longitude -12.7935 and latitude -37.4835, and the northeast corner has longitude -12.0684 and latitude -36.9542. FilterBBox and BiasPosition are mutually exclusive. Specifying both options results in an error. +- `"FilterCategories"`: A list of one or more Amazon Location categories to filter the + returned places. If you include more than one category, the results will include results + that match any of the categories listed. For more information about using categories, + including a list of Amazon Location categories, see Categories and filtering, in the Amazon + Location Service Developer Guide. - `"FilterCountries"`: An optional parameter that limits the search results by returning only places that are in a specified list of countries. Valid values include ISO 3166 3-digit country codes. For example, Australia uses three upper-case characters: AUS. @@ -2311,6 +2329,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys will be in a language that the provider does support. - `"MaxResults"`: An optional parameter. The maximum number of results returned per request. The default: 50 +- `"key"`: The optional API key to authorize the request. """ function search_place_index_for_text( IndexName, Text; aws_config::AbstractAWSConfig=global_aws_config() @@ -2468,9 +2487,7 @@ end update_key(key_name) update_key(key_name, params::Dict{String,<:Any}) -Updates the specified properties of a given API key resource. The API keys feature is in -preview. We may add, change, or remove features before announcing general availability. For -more information, see Using API keys. +Updates the specified properties of a given API key resource. # Arguments - `key_name`: The name of the API key resource to update. @@ -2630,6 +2647,9 @@ Updates the specified properties of a given tracker resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: Updates the description for the tracker resource. +- `"EventBridgeEnabled"`: Whether to enable position UPDATE events from this tracker to be + sent to EventBridge. You do not need enable this feature to get ENTER and EXIT events for + geofences with this tracker. Those events are always sent to EventBridge. - `"PositionFiltering"`: Updates the position filtering for the tracker resource. Valid values: TimeBased - Location updates are evaluated against linked geofence collections, but not every location update is stored. If your update frequency is more often than 30 diff --git a/src/services/m2.jl b/src/services/m2.jl index 501a6daa35..4c5e62473f 100644 --- a/src/services/m2.jl +++ b/src/services/m2.jl @@ -62,7 +62,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys service also handles deleting the clientToken after it expires. - `"description"`: The description of the application. - `"kmsKeyId"`: The identifier of a customer managed key. -- `"roleArn"`: The Amazon Resource Name (ARN) of the role associated with the application. +- `"roleArn"`: The Amazon Resource Name (ARN) that identifies a role that the application + uses to access Amazon Web Services resources that are not part of the application or are in + a different Amazon Web Services account. - `"tags"`: A list of tags to apply to the application. """ function create_application( @@ -658,6 +660,30 @@ function get_environment( ) end +""" + get_signed_bluinsights_url() + get_signed_bluinsights_url(params::Dict{String,<:Any}) + +Gets a single sign-on URL that can be used to connect to AWS Blu Insights. + +""" +function get_signed_bluinsights_url(; aws_config::AbstractAWSConfig=global_aws_config()) + return m2( + "GET", "/signed-bi-url"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_signed_bluinsights_url( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return m2( + "GET", + "/signed-bi-url", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_versions(application_id) list_application_versions(application_id, params::Dict{String,<:Any}) @@ -1294,7 +1320,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Mainframe Modernization accepts the engineVersion parameter only if applyDuringMaintenanceWindow is true. If any parameter other than engineVersion is provided in UpdateEnvironmentRequest, it will fail if applyDuringMaintenanceWindow is set to true. -- `"desiredCapacity"`: The desired capacity for the runtime environment to update. +- `"desiredCapacity"`: The desired capacity for the runtime environment to update. The + minimum possible value is 0 and the maximum is 100. - `"engineVersion"`: The version of the runtime engine for the runtime environment. - `"instanceType"`: The instance type for the runtime environment to update. - `"preferredMaintenanceWindow"`: Configures the maintenance window you want for the diff --git a/src/services/macie2.jl b/src/services/macie2.jl index 2873752c14..575fbe384e 100644 --- a/src/services/macie2.jl +++ b/src/services/macie2.jl @@ -177,17 +177,24 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specify for the job (managedDataIdentifierSelector). To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation. - `"managedDataIdentifierSelector"`: The selection type to apply when determining which - managed data identifiers the job uses to analyze data. Valid values are: ALL - Use all the - managed data identifiers that Amazon Macie provides. If you specify this value, don't - specify any values for the managedDataIdentifierIds property. EXCLUDE - Use all the managed - data identifiers that Macie provides except the managed data identifiers specified by the - managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers - specified by the managedDataIdentifierIds property. NONE - Don't use any managed data - identifiers. If you specify this value, specify at least one custom data identifier for the - job (customDataIdentifierIds) and don't specify any values for the managedDataIdentifierIds - property. If you don't specify a value for this property, the job uses all managed data - identifiers. If you don't specify a value for this property or you specify ALL or EXCLUDE - for a recurring job, the job also uses new managed data identifiers as they are released. + managed data identifiers the job uses to analyze data. Valid values are: ALL (default) - + Use all managed data identifiers. If you specify this value, don't specify any values for + the managedDataIdentifierIds property. EXCLUDE - Use all managed data identifiers except + the ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed + data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any + managed data identifiers. If you specify this value, specify at least one custom data + identifier for the job (customDataIdentifierIds) and don't specify any values for the + managedDataIdentifierIds property. RECOMMENDED - Use only the set of managed data + identifiers that Amazon Web Services recommends for jobs. If you specify this value, don't + specify any values for the managedDataIdentifierIds property. If you don't specify a value + for this property, the job uses all managed data identifiers. If the job is a recurring job + and you don't specify a value for this property or you specify ALL or EXCLUDE, each job run + automatically uses new managed data identifiers that are released. If you specify + RECOMMENDED for a recurring job, each job run automatically uses all the managed data + identifiers that are in the recommended set when the job starts to run. For information + about individual managed data identifiers or to determine which ones are in the recommended + set, see Using managed data identifiers and Recommended managed data identifiers in the + Amazon Macie User Guide. - `"samplingPercentage"`: The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at @@ -285,7 +292,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys matches the pattern and the keyword is within the specified distance, Amazon Macie includes the result. The distance can be 1-300 characters. The default value is 50. - `"severityLevels"`: The severity to assign to findings that the custom data identifier - produces, based on the number of occurrences of text that matches the custom data + produces, based on the number of occurrences of text that match the custom data identifier's detection criteria. You can specify as many as three SeverityLevel objects in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify more than one, the occurrences thresholds must be in ascending order by severity, moving from LOW to HIGH. For @@ -1253,7 +1260,7 @@ end get_finding_statistics(group_by) get_finding_statistics(group_by, params::Dict{String,<:Any}) - Retrieves (queries) aggregated statistical data about findings. +Retrieves (queries) aggregated statistical data about findings. # Arguments - `group_by`: The finding property to use to group the query results. Valid values are: diff --git a/src/services/managedblockchain_query.jl b/src/services/managedblockchain_query.jl new file mode 100644 index 0000000000..eec939f561 --- /dev/null +++ b/src/services/managedblockchain_query.jl @@ -0,0 +1,297 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: managedblockchain_query +using AWS.Compat +using AWS.UUIDs + +""" + batch_get_token_balance() + batch_get_token_balance(params::Dict{String,<:Any}) + +Gets the token balance for a batch of tokens by using the GetTokenBalance action for every +token in the request. Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC +1155 token standards are supported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"getTokenBalanceInputs"`: An array of GetTokenBalanceInput objects whose balance is + being requested. +""" +function batch_get_token_balance(; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/batch-get-token-balance"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_get_token_balance( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/batch-get-token-balance", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_token_balance(owner_identifier, token_identifier) + get_token_balance(owner_identifier, token_identifier, params::Dict{String,<:Any}) + +Gets the balance of a specific token, including native tokens, for a given address (wallet +or contract) on the blockchain. Only the native tokens BTC,ETH, and the ERC-20, ERC-721, +and ERC 1155 token standards are supported. + +# Arguments +- `owner_identifier`: The container for the identifier for the owner. +- `token_identifier`: The container for the identifier for the token, including the unique + token ID and its blockchain network. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"atBlockchainInstant"`: The time for when the TokenBalance is requested or the current + time if a time is not provided in the request. This time will only be recorded up to the + second. +""" +function get_token_balance( + ownerIdentifier, tokenIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/get-token-balance", + Dict{String,Any}( + "ownerIdentifier" => ownerIdentifier, "tokenIdentifier" => tokenIdentifier + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_token_balance( + ownerIdentifier, + tokenIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/get-token-balance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ownerIdentifier" => ownerIdentifier, + "tokenIdentifier" => tokenIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_transaction(network, transaction_hash) + get_transaction(network, transaction_hash, params::Dict{String,<:Any}) + +Get the details of a transaction. + +# Arguments +- `network`: The blockchain network where the transaction occurred. +- `transaction_hash`: The hash of the transaction. It is generated whenever a transaction + is verified and added to the blockchain. + +""" +function get_transaction( + network, transactionHash; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/get-transaction", + Dict{String,Any}("network" => network, "transactionHash" => transactionHash); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_transaction( + network, + transactionHash, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/get-transaction", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "network" => network, "transactionHash" => transactionHash + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_token_balances(token_filter) + list_token_balances(token_filter, params::Dict{String,<:Any}) + +This action returns the following for a given a blockchain network: Lists all token +balances owned by an address (either a contact address or a wallet address). Lists all +token balances for all tokens created by a contract. Lists all token balances for a given +token. You must always specify the network property of the tokenFilter when using this +operation. + +# Arguments +- `token_filter`: The contract address or a token identifier on the blockchain network by + which to filter the request. You must specify the contractAddress property of this + container when listing tokens minted by a contract. You must always specify the network + property of this container when using this operation. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of token balances to return. +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"ownerFilter"`: The contract or wallet address on the blockchain network by which to + filter the request. You must specify the address property of the ownerFilter when listing + balances of tokens owned by the address. +""" +function list_token_balances(tokenFilter; aws_config::AbstractAWSConfig=global_aws_config()) + return managedblockchain_query( + "POST", + "/list-token-balances", + Dict{String,Any}("tokenFilter" => tokenFilter); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_token_balances( + tokenFilter, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-token-balances", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("tokenFilter" => tokenFilter), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_transaction_events(network, transaction_hash) + list_transaction_events(network, transaction_hash, params::Dict{String,<:Any}) + +An array of TransactionEvent objects. Each object contains details about the transaction +event. + +# Arguments +- `network`: The blockchain network where the transaction events occurred. +- `transaction_hash`: The hash of the transaction. It is generated whenever a transaction + is verified and added to the blockchain. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of transaction events to list. Even if additional + results can be retrieved, the request can return less results than maxResults or an empty + array of results. To retrieve the next set of results, make another request with the + returned nextToken value. The value of nextToken is null when there are no more results to + return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +""" +function list_transaction_events( + network, transactionHash; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-transaction-events", + Dict{String,Any}("network" => network, "transactionHash" => transactionHash); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_transaction_events( + network, + transactionHash, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-transaction-events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "network" => network, "transactionHash" => transactionHash + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_transactions(address, network) + list_transactions(address, network, params::Dict{String,<:Any}) + +Lists all of the transactions on a given wallet address or to a specific contract. + +# Arguments +- `address`: The address (either a contract or wallet), whose transactions are being + requested. +- `network`: The blockchain network where the transactions occurred. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"fromBlockchainInstant"`: +- `"maxResults"`: The maximum number of transactions to list. Even if additional results + can be retrieved, the request can return less results than maxResults or an empty array of + results. To retrieve the next set of results, make another request with the returned + nextToken value. The value of nextToken is null when there are no more results to return +- `"nextToken"`: The pagination token that indicates the next set of results to retrieve. +- `"sort"`: Sorts items in an ascending order if the first page starts at fromTime. Sorts + items in a descending order if the first page starts at toTime. +- `"toBlockchainInstant"`: +""" +function list_transactions( + address, network; aws_config::AbstractAWSConfig=global_aws_config() +) + return managedblockchain_query( + "POST", + "/list-transactions", + Dict{String,Any}("address" => address, "network" => network); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_transactions( + address, + network, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return managedblockchain_query( + "POST", + "/list-transactions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("address" => address, "network" => network), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/medialive.jl b/src/services/medialive.jl index 13a7f6224b..404c5db95e 100644 --- a/src/services/medialive.jl +++ b/src/services/medialive.jl @@ -829,6 +829,33 @@ function delete_tags( ) end +""" + describe_account_configuration() + describe_account_configuration(params::Dict{String,<:Any}) + +Get account configuration + +""" +function describe_account_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "GET", + "/prod/accountConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_account_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/accountConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_channel(channel_id) describe_channel(channel_id, params::Dict{String,<:Any}) @@ -1172,6 +1199,53 @@ function describe_schedule( ) end +""" + describe_thumbnails(channel_id, pipeline_id, thumbnail_type) + describe_thumbnails(channel_id, pipeline_id, thumbnail_type, params::Dict{String,<:Any}) + +Describe the latest thumbnails data. + +# Arguments +- `channel_id`: Unique ID of the channel +- `pipeline_id`: Pipeline ID (\"0\" or \"1\") +- `thumbnail_type`: thumbnail type + +""" +function describe_thumbnails( + channelId, pipelineId, thumbnailType; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "GET", + "/prod/channels/$(channelId)/thumbnails", + Dict{String,Any}("pipelineId" => pipelineId, "thumbnailType" => thumbnailType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_thumbnails( + channelId, + pipelineId, + thumbnailType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medialive( + "GET", + "/prod/channels/$(channelId)/thumbnails", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "pipelineId" => pipelineId, "thumbnailType" => thumbnailType + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_channels() list_channels(params::Dict{String,<:Any}) @@ -1847,6 +1921,36 @@ function transfer_input_device( ) end +""" + update_account_configuration() + update_account_configuration(params::Dict{String,<:Any}) + +Update account configuration + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountConfiguration"`: +""" +function update_account_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) + return medialive( + "PUT", + "/prod/accountConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_account_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medialive( + "PUT", + "/prod/accountConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_channel(channel_id) update_channel(channel_id, params::Dict{String,<:Any}) diff --git a/src/services/medical_imaging.jl b/src/services/medical_imaging.jl new file mode 100644 index 0000000000..ee1d708a02 --- /dev/null +++ b/src/services/medical_imaging.jl @@ -0,0 +1,742 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: medical_imaging +using AWS.Compat +using AWS.UUIDs + +""" + copy_image_set(copy_image_set_information, datastore_id, source_image_set_id) + copy_image_set(copy_image_set_information, datastore_id, source_image_set_id, params::Dict{String,<:Any}) + +Copy an image set. + +# Arguments +- `copy_image_set_information`: Copy image set information. +- `datastore_id`: The data store identifier. +- `source_image_set_id`: The source image set identifier. + +""" +function copy_image_set( + copyImageSetInformation, + datastoreId, + sourceImageSetId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(sourceImageSetId)/copyImageSet", + Dict{String,Any}("copyImageSetInformation" => copyImageSetInformation); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function copy_image_set( + copyImageSetInformation, + datastoreId, + sourceImageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(sourceImageSetId)/copyImageSet", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("copyImageSetInformation" => copyImageSetInformation), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_datastore(client_token) + create_datastore(client_token, params::Dict{String,<:Any}) + +Create a data store. + +# Arguments +- `client_token`: A unique identifier for API idempotency. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datastoreName"`: The data store name. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) assigned to the AWS Key Management Service + (AWS KMS) key for accessing encrypted data. +- `"tags"`: The tags provided when creating a data store. +""" +function create_datastore(clientToken; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/datastore", + Dict{String,Any}("clientToken" => clientToken); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_datastore( + clientToken, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => clientToken), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_datastore(datastore_id) + delete_datastore(datastore_id, params::Dict{String,<:Any}) + +Delete a data store. Before a data store can be deleted, you must first delete all image +sets within it. + +# Arguments +- `datastore_id`: The data store identifier. + +""" +function delete_datastore(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "DELETE", + "/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_datastore( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "DELETE", + "/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_image_set(datastore_id, image_set_id) + delete_image_set(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Delete an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +""" +function delete_image_set( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/deleteImageSet"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_image_set( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/deleteImageSet", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_datastore(datastore_id) + get_datastore(datastore_id, params::Dict{String,<:Any}) + +Get data store properties. + +# Arguments +- `datastore_id`: The data store identifier. + +""" +function get_datastore(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "GET", + "/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_datastore( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_dicomimport_job(datastore_id, job_id) + get_dicomimport_job(datastore_id, job_id, params::Dict{String,<:Any}) + +Get the import job properties to learn more about the job or job progress. + +# Arguments +- `datastore_id`: The data store identifier. +- `job_id`: The import job identifier. + +""" +function get_dicomimport_job( + datastoreId, jobId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/getDICOMImportJob/datastore/$(datastoreId)/job/$(jobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_dicomimport_job( + datastoreId, + jobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/getDICOMImportJob/datastore/$(datastoreId)/job/$(jobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_frame(datastore_id, image_frame_information, image_set_id) + get_image_frame(datastore_id, image_frame_information, image_set_id, params::Dict{String,<:Any}) + +Get an image frame (pixel data) for an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_frame_information`: Information about the image frame (pixel data) identifier. +- `image_set_id`: The image set identifier. + +""" +function get_image_frame( + datastoreId, + imageFrameInformation, + imageSetId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageFrame", + Dict{String,Any}("imageFrameInformation" => imageFrameInformation); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_frame( + datastoreId, + imageFrameInformation, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageFrame", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("imageFrameInformation" => imageFrameInformation), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_set(datastore_id, image_set_id) + get_image_set(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Get image set properties. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"version"`: The image set version identifier. +""" +function get_image_set( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSet"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_set( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSet", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_image_set_metadata(datastore_id, image_set_id) + get_image_set_metadata(datastore_id, image_set_id, params::Dict{String,<:Any}) + +Get metadata attributes for an image set. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"version"`: The image set version identifier. +""" +function get_image_set_metadata( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSetMetadata"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_image_set_metadata( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/getImageSetMetadata", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_datastores() + list_datastores(params::Dict{String,<:Any}) + +List data stores created by this AWS account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"datastoreStatus"`: The data store status. +- `"maxResults"`: Valid Range: Minimum value of 1. Maximum value of 50. +- `"nextToken"`: The pagination token used to request the list of data stores on the next + page. +""" +function list_datastores(; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "GET", "/datastore"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_datastores( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", "/datastore", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_dicomimport_jobs(datastore_id) + list_dicomimport_jobs(datastore_id, params::Dict{String,<:Any}) + +List import jobs created by this AWS account for a specific data store. + +# Arguments +- `datastore_id`: The data store identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"jobStatus"`: The filters for listing import jobs based on status. +- `"maxResults"`: The max results count. The upper bound is determined by load testing. +- `"nextToken"`: The pagination token used to request the list of import jobs on the next + page. +""" +function list_dicomimport_jobs( + datastoreId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/listDICOMImportJobs/datastore/$(datastoreId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_dicomimport_jobs( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/listDICOMImportJobs/datastore/$(datastoreId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_image_set_versions(datastore_id, image_set_id) + list_image_set_versions(datastore_id, image_set_id, params::Dict{String,<:Any}) + +List image set versions. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The max results count. +- `"nextToken"`: The pagination token used to request the list of image set versions on the + next page. +""" +function list_image_set_versions( + datastoreId, imageSetId; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/listImageSetVersions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_image_set_versions( + datastoreId, + imageSetId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/listImageSetVersions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists all tags associated with a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource to list + tags for. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + search_image_sets(datastore_id) + search_image_sets(datastore_id, params::Dict{String,<:Any}) + +Search image sets based on defined input attributes. + +# Arguments +- `datastore_id`: The identifier of the data store where the image sets reside. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that can be returned in a search. +- `"nextToken"`: The token used for pagination of results returned in the response. Use the + token returned from the previous request to continue results where the previous request + ended. +- `"searchCriteria"`: The search criteria that filters by applying a maximum of 1 item to + SearchByAttribute. +""" +function search_image_sets(datastoreId; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/searchImageSets"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_image_sets( + datastoreId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/searchImageSets", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_dicomimport_job(client_token, data_access_role_arn, datastore_id, input_s3_uri, output_s3_uri) + start_dicomimport_job(client_token, data_access_role_arn, datastore_id, input_s3_uri, output_s3_uri, params::Dict{String,<:Any}) + +Start importing bulk data into an ACTIVE data store. The import job imports DICOM P10 files +found in the S3 prefix specified by the inputS3Uri parameter. The import job stores +processing results in the file specified by the outputS3Uri parameter. + +# Arguments +- `client_token`: A unique identifier for API idempotency. +- `data_access_role_arn`: The Amazon Resource Name (ARN) of the IAM role that grants + permission to access medical imaging resources. +- `datastore_id`: The data store identifier. +- `input_s3_uri`: The input prefix path for the S3 bucket that contains the DICOM files to + be imported. +- `output_s3_uri`: The output prefix of the S3 bucket to upload the results of the DICOM + import job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"jobName"`: The import job name. +""" +function start_dicomimport_job( + clientToken, + dataAccessRoleArn, + datastoreId, + inputS3Uri, + outputS3Uri; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/startDICOMImportJob/datastore/$(datastoreId)", + Dict{String,Any}( + "clientToken" => clientToken, + "dataAccessRoleArn" => dataAccessRoleArn, + "inputS3Uri" => inputS3Uri, + "outputS3Uri" => outputS3Uri, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_dicomimport_job( + clientToken, + dataAccessRoleArn, + datastoreId, + inputS3Uri, + outputS3Uri, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/startDICOMImportJob/datastore/$(datastoreId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clientToken" => clientToken, + "dataAccessRoleArn" => dataAccessRoleArn, + "inputS3Uri" => inputS3Uri, + "outputS3Uri" => outputS3Uri, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds a user-specifed key and value tag to a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource that tags + are being added to. +- `tags`: The user-specified key and value tag pairs added to a medical imaging resource. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return medical_imaging( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from a medical imaging resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the medical imaging resource that tags + are being removed from. +- `tag_keys`: The keys for the tags to be removed from the medical imaging resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return medical_imaging( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_image_set_metadata(datastore_id, image_set_id, latest_version, update_image_set_metadata_updates) + update_image_set_metadata(datastore_id, image_set_id, latest_version, update_image_set_metadata_updates, params::Dict{String,<:Any}) + +Update image set metadata attributes. + +# Arguments +- `datastore_id`: The data store identifier. +- `image_set_id`: The image set identifier. +- `latest_version`: The latest image set version identifier. +- `update_image_set_metadata_updates`: Update image set metadata updates. + +""" +function update_image_set_metadata( + datastoreId, + imageSetId, + latestVersion, + updateImageSetMetadataUpdates; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/updateImageSetMetadata", + Dict{String,Any}( + "latestVersion" => latestVersion, + "updateImageSetMetadataUpdates" => updateImageSetMetadataUpdates, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_image_set_metadata( + datastoreId, + imageSetId, + latestVersion, + updateImageSetMetadataUpdates, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return medical_imaging( + "POST", + "/datastore/$(datastoreId)/imageSet/$(imageSetId)/updateImageSetMetadata", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "latestVersion" => latestVersion, + "updateImageSetMetadataUpdates" => updateImageSetMetadataUpdates, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/mgn.jl b/src/services/mgn.jl index e8c9a0c63b..b64783429b 100644 --- a/src/services/mgn.jl +++ b/src/services/mgn.jl @@ -13,6 +13,9 @@ Archive application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function archive_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -50,6 +53,9 @@ Archive wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function archive_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -82,6 +88,9 @@ Associate applications to wave. - `application_ids`: Application IDs list. - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function associate_applications( applicationIDs, waveID; aws_config::AbstractAWSConfig=global_aws_config() @@ -125,6 +134,9 @@ Associate source servers to application. - `application_id`: Application ID. - `source_server_ids`: Source server IDs list. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function associate_source_servers( applicationID, sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config() @@ -175,6 +187,9 @@ if the Source Server is already launchable (dataReplicationInfo.lagDuration is n - `source_server_id`: The request to change the source server migration lifecycle state by source server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: The request to change the source server migration account ID. """ function change_server_life_cycle_state( lifeCycle, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -221,6 +236,7 @@ Create application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Application description. - `"tags"`: Application tags. """ @@ -325,6 +341,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ebsEncryptionKeyArn"`: Request to configure an EBS encryption key during Replication Settings template creation. - `"tags"`: Request to configure tags during Replication Settings template creation. +- `"useFipsEndpoint"`: Request to use Fips Endpoint during Replication Settings template + creation. """ function create_replication_configuration_template( associateDefaultSecurityGroup, @@ -414,6 +432,7 @@ Create wave. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Wave description. - `"tags"`: Wave tags. """ @@ -447,6 +466,9 @@ Delete application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function delete_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -484,6 +506,9 @@ Deletes a single Job by ID. # Arguments - `job_id`: Request to delete Job from service by Job ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to delete Job from service by Account ID. """ function delete_job(jobID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -605,6 +630,9 @@ Deletes a single source server by ID. # Arguments - `source_server_id`: Request to delete Source Server from service by Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to delete Source Server from service by Account ID. """ function delete_source_server( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -681,6 +709,9 @@ Delete wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function delete_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -714,6 +745,7 @@ Retrieves detailed job log items with paging. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to describe Job log Account ID. - `"maxResults"`: Request to describe Job log item maximum results. - `"nextToken"`: Request to describe Job log next token. """ @@ -750,6 +782,7 @@ available only to *Support* and only used in response to relevant support ticket # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to describe job log items by Account ID. - `"filters"`: Request to describe Job log filters. - `"maxResults"`: Request to describe job log items by max results. - `"nextToken"`: Request to describe job log items by next token. @@ -849,6 +882,7 @@ Retrieves all SourceServers or multiple SourceServers by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to filter Source Servers list by Accoun ID. - `"filters"`: Request to filter Source Servers list. - `"maxResults"`: Request to filter Source Servers list by maximum results. - `"nextToken"`: Request to filter Source Servers list by next token. @@ -914,6 +948,9 @@ Disassociate applications from wave. - `application_ids`: Application IDs list. - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function disassociate_applications( applicationIDs, waveID; aws_config::AbstractAWSConfig=global_aws_config() @@ -957,6 +994,9 @@ Disassociate source servers from application. - `application_id`: Application ID. - `source_server_ids`: Source server IDs list. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function disassociate_source_servers( applicationID, sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config() @@ -1012,6 +1052,9 @@ dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be null # Arguments - `source_server_id`: Request to disconnect Source Server from service by Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to disconnect Source Server from service by Account ID. """ function disconnect_from_service( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1057,6 +1100,9 @@ dataReplicationInfo.lagDuration and dataReplicationInfo.lagDuration will be null # Arguments - `source_server_id`: Request to finalize Cutover by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to finalize Cutover by Source Account ID. """ function finalize_cutover(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -1092,6 +1138,9 @@ Lists all LaunchConfigurations available, filtered by Source Server IDs. # Arguments - `source_server_id`: Request to get Launch Configuration information by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to get Launch Configuration information by Account ID. """ function get_launch_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1129,6 +1178,9 @@ Lists all ReplicationConfigurations, filtered by Source Server ID. # Arguments - `source_server_id`: Request to get Replication Configuration by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request to get Replication Configuration by Account ID. """ function get_replication_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1189,6 +1241,7 @@ Retrieves all applications or multiple applications by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Applications list Account ID. - `"filters"`: Applications list filters. - `"maxResults"`: Maximum results to return when listing applications. - `"nextToken"`: Request next token. @@ -1346,6 +1399,37 @@ function list_imports( ) end +""" + list_managed_accounts() + list_managed_accounts(params::Dict{String,<:Any}) + +List Managed Accounts. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: List managed accounts request max results. +- `"nextToken"`: List managed accounts request next token. +""" +function list_managed_accounts(; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/ListManagedAccounts"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_managed_accounts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/ListManagedAccounts", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_source_server_actions(source_server_id) list_source_server_actions(source_server_id, params::Dict{String,<:Any}) @@ -1357,6 +1441,8 @@ List source server post migration custom actions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID to return when listing source server post migration custom + actions. - `"filters"`: Filters to apply when listing source server post migration custom actions. - `"maxResults"`: Maximum amount of items to return when listing source server post migration custom actions. @@ -1480,6 +1566,7 @@ Retrieves all waves or multiple waves by ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Request account ID. - `"filters"`: Waves list filters. - `"maxResults"`: Maximum results to return when listing waves. - `"nextToken"`: Request next token. @@ -1506,6 +1593,9 @@ lifecycle. state which equals DISCONNECTED or CUTOVER. # Arguments - `source_server_id`: Mark as archived by Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Mark as archived by Account ID. """ function mark_as_archived(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -1532,6 +1622,46 @@ function mark_as_archived( ) end +""" + pause_replication(source_server_id) + pause_replication(source_server_id, params::Dict{String,<:Any}) + +Pause Replication. + +# Arguments +- `source_server_id`: Pause Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Pause Replication Request account ID. +""" +function pause_replication( + sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/PauseReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function pause_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/PauseReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_source_server_action(action_id, action_name, document_identifier, order, source_server_id) put_source_server_action(action_id, action_name, document_identifier, order, source_server_id, params::Dict{String,<:Any}) @@ -1547,6 +1677,7 @@ Put source server post migration custom action. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Source server post migration custom account ID. - `"active"`: Source server post migration custom action active status. - `"category"`: Source server post migration custom action category. - `"description"`: Source server post migration custom action description. @@ -1697,6 +1828,9 @@ Remove source server post migration custom action. - `action_id`: Source server post migration custom action ID to remove. - `source_server_id`: Source server ID of the post migration custom action to remove. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Source server post migration account ID. """ function remove_source_server_action( actionID, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1784,6 +1918,46 @@ function remove_template_action( ) end +""" + resume_replication(source_server_id) + resume_replication(source_server_id, params::Dict{String,<:Any}) + +Resume Replication. + +# Arguments +- `source_server_id`: Resume Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Resume Replication Request account ID. +""" +function resume_replication( + sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() +) + return mgn( + "POST", + "/ResumeReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function resume_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/ResumeReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retry_data_replication(source_server_id) retry_data_replication(source_server_id, params::Dict{String,<:Any}) @@ -1796,6 +1970,9 @@ state. # Arguments - `source_server_id`: Retry data replication for Source Server ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Retry data replication for Account ID. """ function retry_data_replication( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -1837,6 +2014,7 @@ property to CUTTING_OVER. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Start Cutover by Account IDs - `"tags"`: Start Cutover by Tags. """ function start_cutover(sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1963,6 +2141,9 @@ Starts replication for SNAPSHOT_SHIPPING agents. # Arguments - `source_server_id`: ID of source server on which to start replication. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID on which to start replication. """ function start_replication( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2004,6 +2185,7 @@ property to TESTING. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Start Test for Account ID. - `"tags"`: Start Test by Tags. """ function start_test(sourceServerIDs; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2033,6 +2215,44 @@ function start_test( ) end +""" + stop_replication(source_server_id) + stop_replication(source_server_id, params::Dict{String,<:Any}) + +Stop Replication. + +# Arguments +- `source_server_id`: Stop Replication Request source server ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Stop Replication Request account ID. +""" +function stop_replication(sourceServerID; aws_config::AbstractAWSConfig=global_aws_config()) + return mgn( + "POST", + "/StopReplication", + Dict{String,Any}("sourceServerID" => sourceServerID); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_replication( + sourceServerID, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mgn( + "POST", + "/StopReplication", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceServerID" => sourceServerID), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -2084,6 +2304,7 @@ CUTOVER. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Terminate Target instance by Account ID - `"tags"`: Terminate Target instance by Tags. """ function terminate_target_instances( @@ -2124,6 +2345,9 @@ Unarchive application. # Arguments - `application_id`: Application ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function unarchive_application( applicationID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2161,6 +2385,9 @@ Unarchive wave. # Arguments - `wave_id`: Wave ID. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. """ function unarchive_wave(waveID; aws_config::AbstractAWSConfig=global_aws_config()) return mgn( @@ -2232,6 +2459,7 @@ Update application. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Application description. - `"name"`: Application name. """ @@ -2273,6 +2501,7 @@ Updates multiple LaunchConfigurations by Source Server ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Update Launch configuration Account ID. - `"bootMode"`: Update Launch configuration boot mode request. - `"copyPrivateIp"`: Update Launch configuration copy Private IP request. - `"copyTags"`: Update Launch configuration copy Tags request. @@ -2381,6 +2610,7 @@ Allows you to update multiple ReplicationConfigurations by Source Server ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Update replication configuration Account ID request. - `"associateDefaultSecurityGroup"`: Update replication configuration associate default Application Migration Service Security group request. - `"bandwidthThrottling"`: Update replication configuration bandwidth throttling request. @@ -2400,6 +2630,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"stagingAreaTags"`: Update replication configuration Staging Area Tags request. - `"useDedicatedReplicationServer"`: Update replication configuration use dedicated Replication Server request. +- `"useFipsEndpoint"`: Update replication configuration use Fips Endpoint. """ function update_replication_configuration( sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2462,6 +2693,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"stagingAreaTags"`: Update replication configuration template Staging Area Tags request. - `"useDedicatedReplicationServer"`: Update replication configuration template use dedicated Replication Server request. +- `"useFipsEndpoint"`: Update replication configuration template use Fips Endpoint request. """ function update_replication_configuration_template( replicationConfigurationTemplateID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2510,6 +2742,9 @@ replication type. - `replication_type`: Replication type to which to update source server. - `source_server_id`: ID of source server on which to update replication type. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID on which to update replication type. """ function update_source_server_replication_type( replicationType, sourceServerID; aws_config::AbstractAWSConfig=global_aws_config() @@ -2558,6 +2793,7 @@ Update wave. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountID"`: Account ID. - `"description"`: Wave description. - `"name"`: Wave name. """ diff --git a/src/services/mq.jl b/src/services/mq.jl index 810818bae8..69f6b4e24a 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -17,17 +17,21 @@ broker instance. ec2:DeleteNetworkInterface ec2:DeleteNetworkInterfacePermission ec2:DetachNetworkInterface ec2:DescribeInternetGateways ec2:DescribeNetworkInterfaces ec2:DescribeNetworkInterfacePermissions ec2:DescribeRouteTables ec2:DescribeSecurityGroups ec2:DescribeSubnets ec2:DescribeVpcs For more information, see Create an IAM User and Get -Your AWS Credentials and Never Modify or Delete the Amazon MQ Elastic Network Interface in -the Amazon MQ Developer Guide. +Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ Elastic +Network Interface in the Amazon MQ Developer Guide. # Arguments - `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. Set to true by default, if no value is specified. -- `broker_name`: Required. The broker's name. This value must be unique in your AWS - account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, - and must not contain white spaces, brackets, wildcard characters, or special characters. +- `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web + Services account, 1-50 characters long, must contain only letters, numbers, dashes, and + underscores, and must not contain white spaces, brackets, wildcard characters, or special + characters. Do not add personally identifiable information (PII) or other confidential or + sensitive information in broker names. Broker names are accessible to other Amazon Web + Services services, including CloudWatch Logs. Broker names are not intended to be used for + private or sensitive data. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. @@ -36,13 +40,10 @@ the Amazon MQ Developer Guide. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. -- `users`: Required. The list of broker users (persons or applications) who can access - queues and topics. This value can contain only alphanumeric characters, dashes, periods, - underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Amazon MQ for - RabbitMQ When you create an Amazon MQ for RabbitMQ broker, one and only one administrative - user is accepted and created when a broker is first provisioned. All subsequent broker - users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web - console. +- `users`: The list of broker users (persons or applications) who can access queues and + topics. For Amazon MQ for RabbitMQ brokers, one and only one administrative user is + accepted and created when a broker is first provisioned. All subsequent broker users are + created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -50,11 +51,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys broker. The default is SIMPLE. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. - Amazon MQ passes your ID with the API action. Note: We recommend using a Universally Unique + Amazon MQ passes your ID with the API action. We recommend using a Universally Unique Identifier (UUID) for the creatorRequestId. You may omit the creatorRequestId if your application doesn't require idempotency. -- `"encryptionOptions"`: Encryption options for the broker. Does not apply to RabbitMQ - brokers. +- `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. +- `"dataReplicationPrimaryBrokerArn"`: The Amazon Resource Name (ARN) of the primary broker + that is used to replicate data from in a data replication pair, and is applied to the + replica broker. Must be set when dataReplicationMode is set to CRDR. +- `"encryptionOptions"`: Encryption options for the broker. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -71,8 +75,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no subnet requirements when deployed with public accessibility. Deployment without public accessibility requires at least one subnet. If you specify subnets in a shared VPC for a RabbitMQ broker, the associated VPC to - which the specified subnets belong must be owned by your AWS account. Amazon MQ will not be - able to create VPC endpoints in VPCs that are not owned by your AWS account. + which the specified subnets belong must be owned by your Amazon Web Services account. + Amazon MQ will not be able to create VPC endpoints in VPCs that are not owned by your + Amazon Web Services account. - `"tags"`: Create tags when creating the broker. """ function create_broker( @@ -241,7 +246,10 @@ end create_user(broker-id, password, username) create_user(broker-id, password, username, params::Dict{String,<:Any}) -Creates an ActiveMQ user. +Creates an ActiveMQ user. Do not add personally identifiable information (PII) or other +confidential or sensitive information in broker usernames. Broker usernames are accessible +to other Amazon Web Services services, including CloudWatch Logs. Broker usernames are not +intended to be used for private or sensitive data. # Arguments - `broker-id`: The unique ID that Amazon MQ generates for the broker. @@ -258,6 +266,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"groups"`: The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. +- `"replicationUser"`: Defines if this user is intended for CRDR replication purposes. """ function create_user( broker_id, password, username; aws_config::AbstractAWSConfig=global_aws_config() @@ -771,6 +780,42 @@ function list_users( ) end +""" + promote(broker-id, mode) + promote(broker-id, mode, params::Dict{String,<:Any}) + +Promotes a data replication replica broker to the primary broker role. + +# Arguments +- `broker-id`: The unique ID that Amazon MQ generates for the broker. +- `mode`: The Promote mode requested. Note: Valid values for the parameter are SWITCHOVER, + FAILOVER. + +""" +function promote(broker_id, mode; aws_config::AbstractAWSConfig=global_aws_config()) + return mq( + "POST", + "/v1/brokers/$(broker-id)/promote", + Dict{String,Any}("mode" => mode); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function promote( + broker_id, + mode, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return mq( + "POST", + "/v1/brokers/$(broker-id)/promote", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("mode" => mode), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ reboot_broker(broker-id) reboot_broker(broker-id, params::Dict{String,<:Any}) @@ -820,6 +865,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur during the scheduled maintenance window of the broker or after a manual broker reboot. - `"configuration"`: A list of information about the configuration. +- `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. - `"engineVersion"`: The broker engine version. For a list of supported engine versions, see Supported engines. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of @@ -861,7 +907,8 @@ Updates the specified configuration. # Arguments - `configuration-id`: The unique ID that Amazon MQ generates for the configuration. -- `data`: Required. The base64-encoded XML configuration. +- `data`: Amazon MQ for Active MQ: The base64-encoded XML configuration. Amazon MQ for + RabbitMQ: the base64-encoded Cuttlefish configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -914,6 +961,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"password"`: The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=). +- `"replicationUser"`: Defines whether the user is intended for data replication. """ function update_user(broker_id, username; aws_config::AbstractAWSConfig=global_aws_config()) return mq( diff --git a/src/services/personalize.jl b/src/services/personalize.jl index 44fa7f4830..a9cc60489b 100644 --- a/src/services/personalize.jl +++ b/src/services/personalize.jl @@ -874,7 +874,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"performHPO"`: Whether to perform hyperparameter optimization (HPO) on the specified or selected recipe. The default is false. When performing AutoML, this parameter is always true and you should not set it to false. -- `"recipeArn"`: The ARN of the recipe to use for model training. Only specified when +- `"recipeArn"`: The ARN of the recipe to use for model training. This is required when performAutoML is false. - `"solutionConfig"`: The configuration to use with the solution. When performAutoML is set to true, Amazon Personalize only evaluates the autoMLConfig section of the solution @@ -2730,6 +2730,48 @@ function update_campaign( ) end +""" + update_dataset(dataset_arn, schema_arn) + update_dataset(dataset_arn, schema_arn, params::Dict{String,<:Any}) + +Update a dataset to replace its schema with a new or existing one. For more information, +see Replacing a dataset's schema. + +# Arguments +- `dataset_arn`: The Amazon Resource Name (ARN) of the dataset that you want to update. +- `schema_arn`: The Amazon Resource Name (ARN) of the new schema you want use. + +""" +function update_dataset( + datasetArn, schemaArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return personalize( + "UpdateDataset", + Dict{String,Any}("datasetArn" => datasetArn, "schemaArn" => schemaArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_dataset( + datasetArn, + schemaArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "UpdateDataset", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("datasetArn" => datasetArn, "schemaArn" => schemaArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_metric_attribution() update_metric_attribution(params::Dict{String,<:Any}) diff --git a/src/services/privatenetworks.jl b/src/services/privatenetworks.jl index 6120179103..0179387eef 100644 --- a/src/services/privatenetworks.jl +++ b/src/services/privatenetworks.jl @@ -100,6 +100,14 @@ Activates the specified network site. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientToken"`: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. +- `"commitmentConfiguration"`: Determines the duration and renewal status of the commitment + period for all pending radio units. If you include commitmentConfiguration in the + ActivateNetworkSiteRequest action, you must specify the following: The commitment period + for the radio unit. You can choose a 60-day, 1-year, or 3-year period. Whether you want + your commitment period to automatically renew for one more year after your current + commitment period expires. For pricing, see Amazon Web Services Private 5G Pricing. If + you do not include commitmentConfiguration in the ActivateNetworkSiteRequest action, the + commitment period is set to 60-days. """ function activate_network_site( networkSiteArn, shippingAddress; aws_config::AbstractAWSConfig=global_aws_config() @@ -855,21 +863,38 @@ end start_network_resource_update(network_resource_arn, update_type) start_network_resource_update(network_resource_arn, update_type, params::Dict{String,<:Any}) -Starts an update of the specified network resource. After you submit a request to replace -or return a network resource, the status of the network resource is -CREATING_SHIPPING_LABEL. The shipping label is available when the status of the network -resource is PENDING_RETURN. After the network resource is successfully returned, its status -is DELETED. For more information, see Return a radio unit. +Use this action to do the following tasks: Update the duration and renewal status of the +commitment period for a radio unit. The update goes into effect immediately. Request a +replacement for a network resource. Request that you return a network resource. After +you submit a request to replace or return a network resource, the status of the network +resource changes to CREATING_SHIPPING_LABEL. The shipping label is available when the +status of the network resource is PENDING_RETURN. After the network resource is +successfully returned, its status changes to DELETED. For more information, see Return a +radio unit. # Arguments - `network_resource_arn`: The Amazon Resource Name (ARN) of the network resource. - `update_type`: The update type. REPLACE - Submits a request to replace a defective radio unit. We provide a shipping label that you can use for the return process and we ship - a replacement radio unit to you. RETURN - Submits a request to replace a radio unit that + a replacement radio unit to you. RETURN - Submits a request to return a radio unit that you no longer need. We provide a shipping label that you can use for the return process. + COMMITMENT - Submits a request to change or renew the commitment period. If you choose this + value, then you must set commitmentConfiguration . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"commitmentConfiguration"`: Use this action to extend and automatically renew the + commitment period for the radio unit. You can do the following: Change a 60-day + commitment to a 1-year or 3-year commitment. The change is immediate and the hourly rate + decreases to the rate for the new commitment period. Change a 1-year commitment to a + 3-year commitment. The change is immediate and the hourly rate decreases to the rate for + the 3-year commitment period. Set a 1-year commitment to automatically renew for an + additional 1 year. The hourly rate for the additional year will continue to be the same as + your existing 1-year rate. Set a 3-year commitment to automatically renew for an + additional 1 year. The hourly rate for the additional year will continue to be the same as + your existing 3-year rate. Turn off a previously-enabled automatic renewal on a 1-year or + 3-year commitment. You cannot use the automatic-renewal option for a 60-day commitment. + For pricing, see Amazon Web Services Private 5G Pricing. - `"returnReason"`: The reason for the return. Providing a reason for a return is optional. - `"shippingAddress"`: The shipping address. If you don't provide a shipping address when replacing or returning a network resource, we use the address from the original order for diff --git a/src/services/proton.jl b/src/services/proton.jl index 436d3bbe4f..6fe5474b4c 100644 --- a/src/services/proton.jl +++ b/src/services/proton.jl @@ -1074,6 +1074,35 @@ function delete_component( ) end +""" + delete_deployment(id) + delete_deployment(id, params::Dict{String,<:Any}) + +Delete the deployment. + +# Arguments +- `id`: The ID of the deployment to delete. + +""" +function delete_deployment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return proton( + "DeleteDeployment", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_deployment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "DeleteDeployment", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_environment(name) delete_environment(name, params::Dict{String,<:Any}) @@ -1522,6 +1551,42 @@ function get_component( ) end +""" + get_deployment(id) + get_deployment(id, params::Dict{String,<:Any}) + +Get detailed data for a deployment. + +# Arguments +- `id`: The ID of the deployment that you want to get the detailed data for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentName"`: The name of a component that you want to get the detailed data for. +- `"environmentName"`: The name of a environment that you want to get the detailed data for. +- `"serviceInstanceName"`: The name of the service instance associated with the given + deployment ID. serviceName must be specified to identify the service instance. +- `"serviceName"`: The name of the service associated with the given deployment ID. +""" +function get_deployment(id; aws_config::AbstractAWSConfig=global_aws_config()) + return proton( + "GetDeployment", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_deployment( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "GetDeployment", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_environment(name) get_environment(name, params::Dict{String,<:Any}) @@ -2192,6 +2257,7 @@ components, see Proton components in the Proton User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ @@ -2292,6 +2358,38 @@ function list_components( ) end +""" + list_deployments() + list_deployments(params::Dict{String,<:Any}) + +List deployments. You can filter the result list by environment, service, or a single +service instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"componentName"`: The name of a component for result list filtering. Proton returns + deployments associated with that component. +- `"environmentName"`: The name of an environment for result list filtering. Proton returns + deployments associated with the environment. +- `"maxResults"`: The maximum number of deployments to list. +- `"nextToken"`: A token that indicates the location of the next deployment in the array of + deployment, after the list of deployment that was previously requested. +- `"serviceInstanceName"`: The name of a service instance for result list filtering. Proton + returns the deployments associated with the service instance. +- `"serviceName"`: The name of a service for result list filtering. Proton returns + deployments associated with service instances of the service. +""" +function list_deployments(; aws_config::AbstractAWSConfig=global_aws_config()) + return proton("ListDeployments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_deployments( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return proton( + "ListDeployments", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_environment_account_connections(requested_by) list_environment_account_connections(requested_by, params::Dict{String,<:Any}) @@ -2348,6 +2446,7 @@ List the infrastructure as code outputs for your environment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next environment output in the array of environment outputs, after the list of environment outputs that was previously requested. @@ -2617,6 +2716,7 @@ Get a list service of instance Infrastructure as Code (IaC) outputs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment whose outputs you want. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ @@ -2754,6 +2854,7 @@ Get a list of service pipeline Infrastructure as Code (IaC) outputs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deploymentId"`: The ID of the deployment you want the outputs for. - `"nextToken"`: A token that indicates the location of the next output in the array of outputs, after the list of outputs that was previously requested. """ diff --git a/src/services/quicksight.jl b/src/services/quicksight.jl index 1cc0d8f462..ea2d4a650b 100644 --- a/src/services/quicksight.jl +++ b/src/services/quicksight.jl @@ -2774,6 +2774,97 @@ function describe_dashboard_permissions( ) end +""" + describe_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_job_id) + describe_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_job_id, params::Dict{String,<:Any}) + +Describes an existing snapshot job. Poll job descriptions after a job starts to know the +status of the job. For information on available status codes, see JobStatus. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you have started a snapshot job for. +- `snapshot_job_id`: The ID of the job to be described. The job ID is set when you start a + new job with a StartDashboardSnapshotJob API call. + +""" +function describe_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotJobId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_dashboard_snapshot_job_result(aws_account_id, dashboard_id, snapshot_job_id) + describe_dashboard_snapshot_job_result(aws_account_id, dashboard_id, snapshot_job_id, params::Dict{String,<:Any}) + +Describes the result of an existing snapshot job that has finished running. A finished +snapshot job will return a COMPLETED or FAILED status when you poll the job with a +DescribeDashboardSnapshotJob API call. If the job has not finished running, this operation +returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not +reached a terminal state.. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you have started a snapshot job for. +- `snapshot_job_id`: The ID of the job to be described. The job ID is set when you start a + new job with a StartDashboardSnapshotJob API call. + +""" +function describe_dashboard_snapshot_job_result( + AwsAccountId, + DashboardId, + SnapshotJobId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)/result"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_dashboard_snapshot_job_result( + AwsAccountId, + DashboardId, + SnapshotJobId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs/$(SnapshotJobId)/result", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_data_set(aws_account_id, data_set_id) describe_data_set(aws_account_id, data_set_id, params::Dict{String,<:Any}) @@ -5792,7 +5883,7 @@ QuickSight assets. You can also choose to export any asset dependencies in the s Export jobs run asynchronously and can be polled with a DescribeAssetBundleExportJob API call. When a job is successfully completed, a download URL that contains the exported assets is returned. The URL is valid for 5 minutes and can be refreshed with a -DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 10 +DescribeAssetBundleExportJob API call. Each Amazon QuickSight account can run up to 5 export jobs concurrently. The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported. @@ -5873,7 +5964,7 @@ Starts an Asset Bundle import job. An Asset Bundle import job imports specified QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon -QuickSight account. Each Amazon QuickSight account can run up to 10 import jobs +QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently. The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported. @@ -5882,7 +5973,7 @@ the bundle file before the resources can be imported. - `asset_bundle_import_job_id`: The ID of the job. This ID is unique while the job is running. After the job is completed, you can reuse this ID for another job. - `asset_bundle_import_source`: The source of the asset bundle zip file that contains the - data that you want to import. + data that you want to import. The file must be in QUICKSIGHT_JSON format. - `aws_account_id`: The ID of the Amazon Web Services account to import assets into. # Optional Parameters @@ -5936,6 +6027,78 @@ function start_asset_bundle_import_job( ) end +""" + start_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_configuration, snapshot_job_id, user_configuration) + start_dashboard_snapshot_job(aws_account_id, dashboard_id, snapshot_configuration, snapshot_job_id, user_configuration, params::Dict{String,<:Any}) + +Starts an asynchronous job that generates a dashboard snapshot. You can request up to one +paginated PDF and up to five CSVs per API call. Poll job descriptions with a +DescribeDashboardSnapshotJob API call. Once the job succeeds, use the +DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that the dashboard snapshot + job is executed in. +- `dashboard_id`: The ID of the dashboard that you want to start a snapshot job for. +- `snapshot_configuration`: A structure that describes the configuration of the dashboard + snapshot. +- `snapshot_job_id`: An ID for the dashboard snapshot job. This ID is unique to the + dashboard while the job is running. This ID can be used to poll the status of a job with a + DescribeDashboardSnapshotJob while the job runs. You can reuse this ID for another job 24 + hours after the current job is completed. +- `user_configuration`: A structure that contains information about the anonymous users + that the generated snapshot is for. This API will not return information about registered + Amazon QuickSight. + +""" +function start_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotConfiguration, + SnapshotJobId, + UserConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs", + Dict{String,Any}( + "SnapshotConfiguration" => SnapshotConfiguration, + "SnapshotJobId" => SnapshotJobId, + "UserConfiguration" => UserConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_dashboard_snapshot_job( + AwsAccountId, + DashboardId, + SnapshotConfiguration, + SnapshotJobId, + UserConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/dashboards/$(DashboardId)/snapshot-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SnapshotConfiguration" => SnapshotConfiguration, + "SnapshotJobId" => SnapshotJobId, + "UserConfiguration" => UserConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) diff --git a/src/services/ram.jl b/src/services/ram.jl index 7a2e18945e..e01a4d90a9 100644 --- a/src/services/ram.jl +++ b/src/services/ram.jl @@ -98,6 +98,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Access Manager User Guide. - `"resourceArns"`: Specifies a list of Amazon Resource Names (ARNs) of the resources that you want to share. This can be null if you want to add only principals. +- `"sources"`: Specifies from which source accounts the service principal has access to the + resources in this resource share. """ function associate_resource_share( resourceShareArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -410,6 +412,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Access Manager User Guide. - `"resourceArns"`: Specifies a list of one or more ARNs of the resources to associate with the resource share. +- `"sources"`: Specifies from which source accounts the service principal has access to the + resources in this resource share. - `"tags"`: Specifies one or more tags to attach to the resource share itself. It doesn't attach the tags to the resources associated with the resource share. """ @@ -639,6 +643,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"resourceArns"`: Specifies a list of Amazon Resource Names (ARNs) for one or more resources that you want to remove from the resource share. After the operation runs, these resources are no longer shared with principals associated with the resource share. +- `"sources"`: Specifies from which source accounts the service principal no longer has + access to the resources in this resource share. """ function disassociate_resource_share( resourceShareArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -894,8 +900,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys NextToken response to request the next page of results. - `"principal"`: Specifies the ID of the principal whose resource shares you want to retrieve. This can be an Amazon Web Services account ID, an organization ID, an - organizational unit ID, or the Amazon Resource Name (ARN) of an individual IAM user or - role. You cannot specify this parameter if the association type is RESOURCE. + organizational unit ID, or the Amazon Resource Name (ARN) of an individual IAM role or + user. You cannot specify this parameter if the association type is RESOURCE. - `"resourceArn"`: Specifies the Amazon Resource Name (ARN) of a resource whose resource shares you want to retrieve. You cannot specify this parameter if the association type is PRINCIPAL. diff --git a/src/services/rds.jl b/src/services/rds.jl index 475dcbfaf1..50cb20fe87 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -1073,103 +1073,104 @@ RDS for MySQL or PostgreSQL DB instance as the source. For more information abou DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. # Arguments -- `dbcluster_identifier`: The DB cluster identifier. This parameter is stored as a - lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. - First character must be a letter. Can't end with a hyphen or contain two consecutive - hyphens. Example: my-cluster1 Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `engine`: The name of the database engine to be used for this DB cluster. Valid Values: - aurora-mysql aurora-postgresql mysql postgres Valid for: Aurora DB clusters - and Multi-AZ DB clusters +- `dbcluster_identifier`: The identifier for this DB cluster. This parameter is stored as a + lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character + must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: + my-cluster1 +- `engine`: The database engine to use for this DB cluster. Valid for Cluster Type: Aurora + DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql | aurora-postgresql | mysql + | postgres # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate to each DB - instance in the Multi-AZ DB cluster. This setting is required to create a Multi-AZ DB - cluster. Valid for: Multi-AZ DB clusters only -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB cluster during the maintenance window. By default, minor - engine upgrades are applied automatically. Valid for: Multi-AZ DB clusters only + instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only This + setting is required to create a Multi-AZ DB cluster. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB cluster during the maintenance window. By default, minor engine + upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only - `"AvailabilityZones"`: A list of Availability Zones (AZs) where DB instances in the DB cluster can be created. For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide. - Valid for: Aurora DB clusters only + Valid for Cluster Type: Aurora DB clusters only - `"BacktrackWindow"`: The target backtrack window, in seconds. To disable backtracking, - set this value to 0. Default: 0 Constraints: If specified, this value must be set to a - number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only + set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 + Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 + hours). - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. - Default: 1 Constraints: Must be a value from 1 to 35 Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"CharacterSetName"`: A value that indicates that the DB cluster should be associated - with the specified CharacterSet. Valid for: Aurora DB clusters only -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - cluster to snapshots of the DB cluster. The default is not to copy them. Valid for: Aurora - DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 + Constraints: Must be a value from 1 to 35. +- `"CharacterSetName"`: The name of the character set (CharacterSet) to associate the DB + cluster with. Valid for Cluster Type: Aurora DB clusters only +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB cluster to + snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB instance class in the Amazon RDS - User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for: Multi-AZ - DB clusters only + User Guide. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster + Type: Multi-AZ DB clusters only - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to associate - with this DB cluster. If you do not specify a value, then the default DB cluster parameter - group for the specified DB engine and version is used. Constraints: If supplied, must - match the name of an existing DB cluster parameter group. Valid for: Aurora DB clusters - and Multi-AZ DB clusters + with this DB cluster. If you don't specify a value, then the default DB cluster parameter + group for the specified DB engine and version is used. Valid for Cluster Type: Aurora DB + clusters and Multi-AZ DB clusters Constraints: If supplied, must match the name of an + existing DB cluster parameter group. - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting - is required to create a Multi-AZ DB cluster. Constraints: Must match the name of an - existing DBSubnetGroup. Must not be default. Example: mydbsubnetgroup Valid for: Aurora DB - clusters and Multi-AZ DB clusters + is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. + Must not be default. Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. If you - do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are - creating. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB + don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are + creating. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters +- `"DeletionProtection"`: Specifies whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. By default, deletion + protection isn't enabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"Domain"`: The Active Directory directory ID to create the DB cluster in. For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster. For more information, see Kerberos authentication in the Amazon - Aurora User Guide. Valid for: Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. Valid for Cluster Type: Aurora DB clusters only - `"EnableCloudwatchLogsExports"`: The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. - RDS for MySQL Possible values are error, general, and slowquery. RDS for PostgreSQL - Possible values are postgresql and upgrade. Aurora MySQL Possible values are audit, - error, general, and slowquery. Aurora PostgreSQL Possible value is postgresql. For more - information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to - Amazon CloudWatch Logs in the Amazon RDS User Guide. For more information about exporting - CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs - in the Amazon Aurora User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"EnableGlobalWriteForwarding"`: A value that indicates whether to enable this DB cluster - to forward write operations to the primary cluster of an Aurora global database - (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that - are secondary clusters in an Aurora global database. You can set this value only on Aurora - DB clusters that are members of an Aurora global database. With this parameter enabled, a - secondary cluster can forward writes to the current primary cluster and the resulting - changes are replicated back to this cluster. For the primary DB cluster of an Aurora global - database, this value is used immediately if the primary is demoted by the - FailoverGlobalCluster API operation, but it does nothing until then. Valid for: Aurora DB - clusters only -- `"EnableHttpEndpoint"`: A value that indicates whether to enable the HTTP endpoint for an - Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, - the HTTP endpoint provides a connectionless web service API for running SQL queries on the - Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS - console with the query editor. For more information, see Using the Data API for Aurora - Serverless v1 in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnablePerformanceInsights"`: A value that indicates whether to turn on Performance - Insights for the DB cluster. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only + exporting to CloudWatch Logs. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters The following values are valid for each DB engine: Aurora MySQL - audit | error + | general | slowquery Aurora PostgreSQL - postgresql RDS for MySQL - error | general + | slowquery RDS for PostgreSQL - postgresql | upgrade For more information about + exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch + Logs in the Amazon RDS User Guide. For more information about exporting CloudWatch Logs for + Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora + User Guide. +- `"EnableGlobalWriteForwarding"`: Specifies whether to enable this DB cluster to forward + write operations to the primary cluster of a global cluster (Aurora global database). By + default, write operations are not allowed on Aurora DB clusters that are secondary clusters + in an Aurora global database. You can set this value only on Aurora DB clusters that are + members of an Aurora global database. With this parameter enabled, a secondary cluster can + forward writes to the current primary cluster, and the resulting changes are replicated + back to this cluster. For the primary DB cluster of an Aurora global database, this value + is used immediately if the primary is demoted by a global cluster API operation, but it + does nothing until then. Valid for Cluster Type: Aurora DB clusters only +- `"EnableHttpEndpoint"`: Specifies whether to enable the HTTP endpoint for an Aurora + Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, the HTTP + endpoint provides a connectionless web service API for running SQL queries on the Aurora + Serverless v1 DB cluster. You can also query your database from inside the RDS console with + the query editor. For more information, see Using the Data API for Aurora Serverless v1 in + the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnablePerformanceInsights"`: Specifies whether to turn on Performance Insights for the + DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only - `"EngineMode"`: The DB engine mode of the DB cluster, either provisioned or serverless. The serverless engine mode only applies for Aurora Serverless v1 DB clusters. For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide: Limitations of Aurora Serverless v1 - Requirements for Aurora Serverless v2 Valid for: Aurora DB clusters only + Requirements for Aurora Serverless v2 Valid for Cluster Type: Aurora DB clusters only - `"EngineVersion"`: The version number of the database engine to use. To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command: aws rds describe-db-engine-versions --engine @@ -1181,45 +1182,47 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions - --engine postgres --query \"DBEngineVersions[].EngineVersion\" Aurora MySQL For - information, see Database engine updates for Amazon Aurora MySQL in the Amazon Aurora User - Guide. Aurora PostgreSQL For information, see Amazon Aurora PostgreSQL releases and - engine versions in the Amazon Aurora User Guide. MySQL For information, see Amazon RDS - for MySQL in the Amazon RDS User Guide. PostgreSQL For information, see Amazon RDS for - PostgreSQL in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + --engine postgres --query \"DBEngineVersions[].EngineVersion\" For information about a + specific engine, see the following topics: Aurora MySQL - see Database engine updates for + Amazon Aurora MySQL in the Amazon Aurora User Guide. Aurora PostgreSQL - see Amazon + Aurora PostgreSQL releases and engine versions in the Amazon Aurora User Guide. RDS for + MySQL - see Amazon RDS for MySQL in the Amazon RDS User Guide. RDS for PostgreSQL - see + Amazon RDS for PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB + clusters and Multi-AZ DB clusters - `"GlobalClusterIdentifier"`: The global cluster ID of an Aurora cluster that becomes the - primary cluster in the new global database cluster. Valid for: Aurora DB clusters only + primary cluster in the new global database cluster. Valid for Cluster Type: Aurora DB + clusters only - `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Provisioned IOPS storage in the Amazon RDS User Guide. This setting - is required to create a Multi-AZ DB cluster. Constraints: Must be a multiple between .5 and - 50 of the storage amount for the DB cluster. Valid for: Multi-AZ DB clusters only + is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters + only Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB + cluster. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. When a KMS key isn't specified in KmsKeyId: If - ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the - KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key. - If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't - specified, then Amazon RDS will use your default KMS key. There is a default KMS key for - your Amazon Web Services account. Your Amazon Web Services account has a different default - KMS key for each Amazon Web Services Region. If you create a read replica of an encrypted - DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key - identifier that is valid in the destination Amazon Web Services Region. This KMS key is - used to encrypt the read replica in that Amazon Web Services Region. Valid for: Aurora DB - clusters and Multi-AZ DB clusters -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and - Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User - Guide. Constraints: Can't manage the master user password with Amazon Web Services - Secrets Manager if MasterUserPassword is specified. Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"MasterUserPassword"`: The password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned - on. Valid for: Aurora DB clusters and Multi-AZ DB clusters + ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS uses the KMS + key used to encrypt the source. Otherwise, Amazon RDS uses your default KMS key. If the + StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then + Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web + Services account. Your Amazon Web Services account has a different default KMS key for each + Amazon Web Services Region. If you create a read replica of an encrypted DB cluster in + another Amazon Web Services Region, make sure to set KmsKeyId to a KMS key identifier that + is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the + read replica in that Amazon Web Services Region. Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management + with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide. Valid for + Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Can't manage the + master user password with Amazon Web Services Secrets Manager if MasterUserPassword is + specified. +- `"MasterUserPassword"`: The password for the master database user. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 + characters. Can contain any printable ASCII character except \"/\", \"\"\", or \"@\". + Can't be specified if ManageMasterUserPassword is turned on. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by RDS in Amazon Web @@ -1231,47 +1234,45 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"MasterUsername"`: The name of the master user for the DB cluster. Constraints: Must - be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved - word for the chosen database engine. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUsername"`: The name of the master user for the DB cluster. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must be 1 to 16 letters or + numbers. First character must be a letter. Can't be a reserved word for the chosen + database engine. - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, also set - MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 Valid - for: Multi-AZ DB clusters only + metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a + value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid - for: Multi-AZ DB clusters only -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB cluster. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only -- `"OptionGroupName"`: A value that indicates that the DB cluster should be associated with - the specified option group. DB clusters are associated with a default option group that - can't be modified. + for Cluster Type: Multi-AZ DB clusters only +- `"NetworkType"`: The network type of the DB cluster. The network type is determined by + the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB + clusters only Valid Values: IPV4 | DUAL +- `"OptionGroupName"`: The option group to associate the DB cluster with. DB clusters are + associated with a default option group that can't be modified. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a - different default KMS key for each Amazon Web Services Region. Valid for: Multi-AZ DB - clusters only -- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. Valid for: + different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only +- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights + data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, + where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * + 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that + isn't valid, such as 94, Amazon RDS issues an error. - `"Port"`: The port number on which the instances in the DB cluster accept connections. - RDS for MySQL and Aurora MySQL Default: 3306 Valid values: 1150-65535 RDS for - PostgreSQL and Aurora PostgreSQL Default: 5432 Valid values: 1150-65535 Valid for: - Aurora DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: + 1150-65535 Default: RDS for MySQL and Aurora MySQL - 3306 RDS for PostgreSQL and + Aurora PostgreSQL - 5432 - `"PreSignedUrl"`: When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region @@ -1295,62 +1296,63 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that - can run in the source Amazon Web Services Region. Valid for: Aurora DB clusters onlyIf you - supply a value for this operation's SourceRegion parameter, a pre-signed URL will be - calculated on your behalf. + can run in the source Amazon Web Services Region. Valid for Cluster Type: Aurora DB + clusters onlyIf you supply a value for this operation's SourceRegion parameter, a + pre-signed URL will be calculated on your behalf. - `"PreferredBackupWindow"`: The daily time range during which automated backups are - created if automated backups are enabled using the BackupRetentionPeriod parameter. The - default is a 30-minute window selected at random from an 8-hour block of time for each - Amazon Web Services Region. To view the time blocks available, see Backup window in the - Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be - in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + created if automated backups are enabled using the BackupRetentionPeriod parameter. Valid + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a 30-minute + window selected at random from an 8-hour block of time for each Amazon Web Services Region. + To view the time blocks available, see Backup window in the Amazon Aurora User Guide. + Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal Coordinated + Time (UTC). Must not conflict with the preferred maintenance window. Must be at least + 30 minutes. - `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can - occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is - a 30-minute window selected at random from an 8-hour block of time for each Amazon Web + occur. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters The default is a + 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. - Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"PubliclyAccessible"`: A value that indicates whether the DB cluster is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB cluster isn't publicly accessible, it is an internal DB cluster with - a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the - target Region doesn’t have an internet gateway attached to it, the DB cluster is private. - If the default VPC in the target Region has an internet gateway attached to it, the DB - cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't - specified, the following applies: If the subnets are part of a VPC that doesn’t have an - internet gateway attached to it, the DB cluster is private. If the subnets are part of a - VPC that has an internet gateway attached to it, the DB cluster is public. Valid for: - Multi-AZ DB clusters only + Constraints: Must be in the format ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | + Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be + at least 30 minutes. +- `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the + DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the + private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to + the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is + ultimately controlled by the security group it uses. That public access isn't permitted if + the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't + publicly accessible, it is an internal DB cluster with a DNS name that resolves to a + private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default + behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName + isn't specified, and PubliclyAccessible isn't specified, the following applies: If the + default VPC in the target Region doesn’t have an internet gateway attached to it, the DB + cluster is private. If the default VPC in the target Region has an internet gateway + attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and + PubliclyAccessible isn't specified, the following applies: If the subnets are part of a + VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If + the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster + is public. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance - or DB cluster if this DB cluster is created as a read replica. Valid for: Aurora DB - clusters and Multi-AZ DB clusters + or DB cluster if this DB cluster is created as a read replica. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"ScalingConfiguration"`: For DB clusters in serverless DB engine mode, the scaling - properties of the DB cluster. Valid for: Aurora DB clusters only + properties of the DB cluster. Valid for Cluster Type: Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: - `"SourceRegion"`: The ID of the region that contains the source for the read replica. -- `"StorageEncrypted"`: A value that indicates whether the DB cluster is encrypted. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. This - setting is required to create a Multi-AZ DB cluster. When specified for a Multi-AZ DB - cluster, a value for the Iops parameter is required. Valid values: aurora, aurora-iopt1 - (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: aurora (Aurora DB clusters); io1 - (Multi-AZ DB clusters) Valid for: Aurora DB clusters and Multi-AZ DB clusters For more - information on storage types for Aurora DB clusters, see Storage configurations for Amazon - Aurora DB clusters. For more information on storage types for Multi-AZ DB clusters, see - Settings for creating Multi-AZ DB clusters. -- `"Tags"`: Tags to assign to the DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB - clusters +- `"StorageEncrypted"`: Specifies whether the DB cluster is encrypted. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB + clusters. For information on storage types for Multi-AZ DB clusters, see Settings for + creating Multi-AZ DB clusters. This setting is required to create a Multi-AZ DB cluster. + When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid + for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: Aurora DB + clusters - aurora | aurora-iopt1 Multi-AZ DB clusters - io1 Default: Aurora DB + clusters - aurora Multi-AZ DB clusters - io1 +- `"Tags"`: Tags to assign to the DB cluster. Valid for Cluster Type: Aurora DB clusters + and Multi-AZ DB clusters - `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB - cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters """ function create_dbcluster( DBClusterIdentifier, Engine; aws_config::AbstractAWSConfig=global_aws_config() @@ -1617,97 +1619,97 @@ Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide. or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance classes in the Amazon Aurora User Guide. -- `dbinstance_identifier`: The DB instance identifier. This parameter is stored as a - lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. - First character must be a letter. Can't end with a hyphen or contain two consecutive +- `dbinstance_identifier`: The identifier for this DB instance. This parameter is stored as + a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. + First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance -- `engine`: The name of the database engine to be used for this instance. Not every - database engine is available for every Amazon Web Services Region. Valid Values: - aurora-mysql (for Aurora MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB - instances) custom-oracle-ee (for RDS Custom for Oracle DB instances) - custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) custom-sqlserver-ee (for - RDS Custom for SQL Server DB instances) custom-sqlserver-se (for RDS Custom for SQL - Server DB instances) custom-sqlserver-web (for RDS Custom for SQL Server DB instances) - mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb - postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web +- `engine`: The database engine to use for this DB instance. Not every database engine is + available in every Amazon Web Services Region. Valid Values: aurora-mysql (for Aurora + MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB instances) + custom-oracle-ee (for RDS Custom for Oracle DB instances) custom-oracle-ee-cdb (for RDS + Custom for Oracle DB instances) custom-sqlserver-ee (for RDS Custom for SQL Server DB + instances) custom-sqlserver-se (for RDS Custom for SQL Server DB instances) + custom-sqlserver-web (for RDS Custom for SQL Server DB instances) mariadb mysql + oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee + sqlserver-se sqlserver-ex sqlserver-web # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate for the DB - instance. Type: Integer Amazon Aurora Not applicable. Aurora cluster volumes + instance. This setting doesn't apply to Amazon Aurora DB instances. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume. Amazon RDS Custom Constraints to the amount of storage for each storage type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. - MySQL Constraints to the amount of storage for each storage type are the following: - General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage - (standard): Must be an integer from 5 to 3072. MariaDB Constraints to the amount of - storage for each storage type are the following: General Purpose (SSD) storage (gp2, + RDS for MariaDB Constraints to the amount of storage for each storage type are the + following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. + Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage + (standard): Must be an integer from 5 to 3072. RDS for MySQL Constraints to the amount + of storage for each storage type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage (standard): Must be an integer from 5 to - 3072. PostgreSQL Constraints to the amount of storage for each storage type are the + 3072. RDS for Oracle Constraints to the amount of storage for each storage type are the following: General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. Magnetic storage - (standard): Must be an integer from 5 to 3072. Oracle Constraints to the amount of - storage for each storage type are the following: General Purpose (SSD) storage (gp2, - gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be an - integer from 100 to 65536. Magnetic storage (standard): Must be an integer from 10 to - 3072. SQL Server Constraints to the amount of storage for each storage type are the - following: General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions: - Must be an integer from 20 to 16384. Web and Express editions: Must be an integer from 20 - to 16384. Provisioned IOPS storage (io1): Enterprise and Standard editions: Must be - an integer from 100 to 16384. Web and Express editions: Must be an integer from 100 to - 16384. Magnetic storage (standard): Enterprise and Standard editions: Must be an - integer from 20 to 1024. Web and Express editions: Must be an integer from 20 to 1024. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB instance during the maintenance window. By default, minor - engine upgrades are applied automatically. If you create an RDS Custom DB instance, you - must set AutoMinorVersionUpgrade to false. + (standard): Must be an integer from 10 to 3072. RDS for PostgreSQL Constraints to the + amount of storage for each storage type are the following: General Purpose (SSD) storage + (gp2, gp3): Must be an integer from 20 to 65536. Provisioned IOPS storage (io1): Must be + an integer from 100 to 65536. Magnetic storage (standard): Must be an integer from 5 to + 3072. RDS for SQL Server Constraints to the amount of storage for each storage type are + the following: General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard + editions: Must be an integer from 20 to 16384. Web and Express editions: Must be an + integer from 20 to 16384. Provisioned IOPS storage (io1): Enterprise and Standard + editions: Must be an integer from 100 to 16384. Web and Express editions: Must be an + integer from 100 to 16384. Magnetic storage (standard): Enterprise and Standard + editions: Must be an integer from 20 to 1024. Web and Express editions: Must be an + integer from 20 to 1024. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB instance during the maintenance window. By default, minor engine + upgrades are applied automatically. If you create an RDS Custom DB instance, you must set + AutoMinorVersionUpgrade to false. - `"AvailabilityZone"`: The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and - Availability Zones. Amazon Aurora Each Aurora DB cluster hosts copies of its storage in - three separate Availability Zones. Specify one of these Availability Zones. Aurora + Availability Zones. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage + in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. - Example: us-east-1d Constraint: The AvailabilityZone parameter can't be specified if the - DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same - Amazon Web Services Region as the current endpoint. + Constraints: The AvailabilityZone parameter can't be specified if the DB instance is a + Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web + Services Region as the current endpoint. Example: us-east-1d - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 - disables automated backups. Amazon Aurora Not applicable. The retention period for - automated backups is managed by the DB cluster. Default: 1 Constraints: Must be a value - from 0 to 35 Can't be set to 0 if the DB instance is a source to read replicas Can't be - set to 0 for an RDS Custom for Oracle DB instance -- `"BackupTarget"`: Specifies where automated backups and manual snapshots are stored. - Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services - Region). The default is region. For more information, see Working with Amazon RDS on Amazon - Web Services Outposts in the Amazon RDS User Guide. -- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB - instance’s server certificate. This setting doesn't apply to RDS Custom. For more + disables automated backups. This setting doesn't apply to Amazon Aurora DB instances. The + retention period for automated backups is managed by the DB cluster. Default: 1 + Constraints: Must be a value from 0 to 35. Can't be set to 0 if the DB instance is a + source to read replicas. Can't be set to 0 for an RDS Custom for Oracle DB instance. +- `"BackupTarget"`: The location for storing automated backups and manual snapshots. Valie + Values: outposts (Amazon Web Services Outposts) region (Amazon Web Services Region) + Default: region For more information, see Working with Amazon RDS on Amazon Web Services + Outposts in the Amazon RDS User Guide. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. -- `"CharacterSetName"`: For supported engines, this value indicates that the DB instance - should be associated with the specified CharacterSet. This setting doesn't apply to RDS - Custom. However, if you need to change the character set, you can change it on the database - itself. Amazon Aurora Not applicable. The character set is managed by the DB cluster. For - more information, see CreateDBCluster. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy tags from the DB instance - to snapshots of the DB instance. By default, tags are not copied. Amazon Aurora Not - applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for - an Aurora DB instance has no effect on the DB cluster setting. +- `"CharacterSetName"`: For supported engines, the character set (CharacterSet) to + associate the DB instance with. This setting doesn't apply to the following DB instances: + Amazon Aurora - The character set is managed by the DB cluster. For more information, see + CreateDBCluster. RDS Custom - However, if you need to change the character set, you can + change it on the database itself. +- `"CopyTagsToSnapshot"`: Specifies whether to copy tags from the DB instance to snapshots + of the DB instance. By default, tags are not copied. This setting doesn't apply to Amazon + Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this + value for an Aurora DB instance has no effect on the DB cluster setting. - `"CustomIamInstanceProfile"`: The instance profile associated with the underlying Amazon - EC2 instance of an RDS Custom DB instance. The instance profile must meet the following - requirements: The profile must exist in your account. The profile must have an IAM role + EC2 instance of an RDS Custom DB instance. This setting is required for RDS Custom. + Constraints: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. - This setting is required for RDS Custom. -- `"DBClusterIdentifier"`: The identifier of the DB cluster that the instance will belong - to. This setting doesn't apply to RDS Custom. +- `"DBClusterIdentifier"`: The identifier of the DB cluster that this DB instance will + belong to. This setting doesn't apply to RDS Custom DB instances. - `"DBName"`: The meaning of this parameter differs according to the database engine you use. MySQL The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must @@ -1722,125 +1724,146 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys postgres is created in the DB instance. Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine - Oracle The Oracle System ID (SID) of the created DB instance. If you specify null, the - default value ORCL is used. You can't specify the string NULL, or any other reserved word, - for DBName. Default: ORCL Constraints: Can't be longer than 8 characters Amazon RDS - Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If you - don't specify a value, the default value is ORCL. Default: ORCL Constraints: It must - contain 1 to 8 alphanumeric characters. It must contain a letter. It can't be a word - reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must - be null. SQL Server Not applicable. Must be null. Amazon Aurora MySQL The name of the - database to create when the primary DB instance of the Aurora MySQL DB cluster is created. - If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in - the DB cluster. Constraints: It must contain 1 to 64 alphanumeric characters. It can't - be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the - database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is - created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database - named postgres is created in the DB cluster. Constraints: It must contain 1 to 63 - alphanumeric characters. It must begin with a letter. Subsequent characters can be - letters, underscores, or digits (0 to 9). It can't be a word reserved by the database - engine. + Oracle The Oracle System ID (SID) of the created DB instance. If you don't specify a + value, the default value is ORCL. You can't specify the string null, or any other reserved + word, for DBName. Default: ORCL Constraints: Can't be longer than 8 characters Amazon + RDS Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If + you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs. + Default: ORCL Constraints: It must contain 1 to 8 alphanumeric characters. It must + contain a letter. It can't be a word reserved by the database engine. Amazon RDS + Custom for SQL Server Not applicable. Must be null. SQL Server Not applicable. Must be + null. Amazon Aurora MySQL The name of the database to create when the primary DB instance + of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora + MySQL DB cluster, no database is created in the DB cluster. Constraints: It must contain + 1 to 64 alphanumeric characters. It can't be a word reserved by the database engine. + Amazon Aurora PostgreSQL The name of the database to create when the primary DB instance + of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an + Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster. + Constraints: It must contain 1 to 63 alphanumeric characters. It must begin with a + letter. Subsequent characters can be letters, underscores, or digits (0 to 9). It can't + be a word reserved by the database engine. - `"DBParameterGroupName"`: The name of the DB parameter group to associate with this DB - instance. If you do not specify a value, then the default DB parameter group for the - specified DB engine and version is used. This setting doesn't apply to RDS Custom. - Constraints: It must be 1 to 255 letters, numbers, or hyphens. The first character must - be a letter. It can't end with a hyphen or contain two consecutive hyphens. + instance. If you don't specify a value, then Amazon RDS uses the default DB parameter group + for the specified DB engine and version. This setting doesn't apply to RDS Custom DB + instances. Constraints: Must be 1 to 255 letters, numbers, or hyphens. The first + character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. - `"DBSecurityGroups"`: A list of DB security groups to associate with this DB instance. This setting applies to the legacy EC2-Classic platform, which is no longer used to create new DB instances. Use the VpcSecurityGroupIds setting instead. - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB instance. Constraints: - Must match the name of an existing DBSubnetGroup. Must not be default. Example: + Must match the name of an existing DB subnet group. Must not be default. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. Amazon Aurora Not applicable. You can enable or disable deletion protection for - the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can - be deleted even when deletion protection is enabled for the DB cluster. +- `"DBSystemId"`: The Oracle system identifier (SID), which is the name of the Oracle + database instance that manages your database files. In this context, the term \"Oracle + database instance\" refers exclusively to the system global area (SGA) and Oracle + background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle + SID is also the name of your CDB. +- `"DeletionProtection"`: Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. This + setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion + protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a + DB cluster can be deleted even when deletion protection is enabled for the DB cluster. - `"Domain"`: The Active Directory directory ID to create the DB instance in. Currently, - only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an + only Microsoft SQL Server, MySQL, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon - RDS User Guide. This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. - The domain is managed by the DB cluster. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. Amazon Aurora Not - applicable. The domain is managed by the DB cluster. + RDS User Guide. This setting doesn't apply to the following DB instances: Amazon Aurora + (The domain is managed by the DB cluster.) RDS Custom +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to the following DB instances: Amazon + Aurora (The domain is managed by the DB cluster.) RDS Custom +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of log types that need to be enabled for - exporting to CloudWatch Logs. The values in the list depend on the DB engine. For more - information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User - Guide. Amazon Aurora Not applicable. CloudWatch Logs exports are managed by the DB - cluster. RDS Custom Not applicable. MariaDB Possible values are audit, error, general, - and slowquery. Microsoft SQL Server Possible values are agent and error. MySQL Possible - values are audit, error, general, and slowquery. Oracle Possible values are alert, audit, - listener, trace, and oemagent. PostgreSQL Possible values are postgresql and upgrade. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. For more information - about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the - Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in - the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication for - MySQL and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS - Custom. Amazon Aurora Not applicable. Mapping Amazon Web Services IAM accounts to - database accounts is managed by the DB cluster. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"EngineVersion"`: The version number of the database engine to use. For a list of valid - engine versions, use the DescribeDBEngineVersions operation. The following are the database - engines and links to information about the major and minor versions that are available with - Amazon RDS. Not every database engine is available for every Amazon Web Services Region. - Amazon Aurora Not applicable. The version number of the database engine to be used by the - DB instance is managed by the DB cluster. Amazon RDS Custom for Oracle A custom engine - version (CEV) that you have previously created. This setting is required for RDS Custom for - Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is - 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the - Amazon RDS User Guide. Amazon RDS Custom for SQL Server See RDS Custom for SQL Server - general requirements in the Amazon RDS User Guide. MariaDB For information, see MariaDB - on Amazon RDS Versions in the Amazon RDS User Guide. Microsoft SQL Server For - information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide. - MySQL For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide. - Oracle For information, see Oracle Database Engine Release Notes in the Amazon RDS User - Guide. PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions - in the Amazon RDS User Guide. -- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be - initially allocated for the DB instance. For information about valid IOPS values, see - Amazon RDS DB instance storage in the Amazon RDS User Guide. Constraints: For MariaDB, - MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the - storage amount for the DB instance. For SQL Server DB instances, must be a multiple between - 1 and 50 of the storage amount for the DB instance. Amazon Aurora Not applicable. Storage - is managed by the DB cluster. + exporting to CloudWatch Logs. For more information, see Publishing Database Logs to Amazon + CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to the following + DB instances: Amazon Aurora (CloudWatch Logs exports are managed by the DB cluster.) + RDS Custom The following values are valid for each DB engine: RDS for MariaDB - audit | + error | general | slowquery RDS for Microsoft SQL Server - agent | error RDS for + MySQL - audit | error | general | slowquery RDS for Oracle - alert | audit | listener | + trace | oemagent RDS for PostgreSQL - postgresql | upgrade +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. For more information about RDS on + Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS + User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon + Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication for MySQL and + PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to the following DB + instances: Amazon Aurora (Mapping Amazon Web Services IAM accounts to database accounts + is managed by the DB cluster.) RDS Custom +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"EngineVersion"`: The version number of the database engine to use. This setting doesn't + apply to Amazon Aurora DB instances. The version number of the database engine the DB + instance uses is managed by the DB cluster. For a list of valid engine versions, use the + DescribeDBEngineVersions operation. The following are the database engines and links to + information about the major and minor versions that are available with Amazon RDS. Not + every database engine is available for every Amazon Web Services Region. Amazon RDS Custom + for Oracle A custom engine version (CEV) that you have previously created. This setting is + required for RDS Custom for Oracle. The CEV name has the following format: + 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating + an RDS Custom for Oracle DB instance in the Amazon RDS User Guide. Amazon RDS Custom for + SQL Server See RDS Custom for SQL Server general requirements in the Amazon RDS User + Guide. RDS for MariaDB For information, see MariaDB on Amazon RDS versions in the Amazon + RDS User Guide. RDS for Microsoft SQL Server For information, see Microsoft SQL Server + versions on Amazon RDS in the Amazon RDS User Guide. RDS for MySQL For information, see + MySQL on Amazon RDS versions in the Amazon RDS User Guide. RDS for Oracle For + information, see Oracle Database Engine release notes in the Amazon RDS User Guide. RDS + for PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions in + the Amazon RDS User Guide. +- `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to + initially allocate for the DB instance. For information about valid IOPS values, see Amazon + RDS DB instance storage in the Amazon RDS User Guide. This setting doesn't apply to Amazon + Aurora DB instances. Storage is managed by the DB cluster. Constraints: For RDS for + MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the + storage amount for the DB instance. For RDS for SQL Server - Must be a multiple between 1 + and 50 of the storage amount for the DB instance. - `"KmsKeyId"`: The Amazon Web Services KMS key identifier for an encrypted DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the - key ARN or alias ARN. Amazon Aurora Not applicable. The Amazon Web Services KMS key - identifier is managed by the DB cluster. For more information, see CreateDBCluster. If - StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, - then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web - Services account. Your Amazon Web Services account has a different default KMS key for each - Amazon Web Services Region. Amazon RDS Custom A KMS key is required for RDS Custom - instances. For most RDS engines, if you leave this parameter empty while enabling - StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the - default key when this parameter is empty. You must explicitly specify a key. -- `"LicenseModel"`: License model information for this DB instance. Valid values: - license-included | bring-your-own-license | general-public-license This setting doesn't - apply to RDS Custom. Amazon Aurora Not applicable. -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. For more information, see Password - management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. - Constraints: Can't manage the master user password with Amazon Web Services Secrets - Manager if MasterUserPassword is specified. -- `"MasterUserPassword"`: The password for the master user. The password can include any - printable ASCII character except \"/\", \"\"\", or \"@\". Amazon Aurora Not applicable. - The password for the master user is managed by the DB cluster. Constraints: Can't be - specified if ManageMasterUserPassword is turned on. MariaDB Constraints: Must contain - from 8 to 41 characters. Microsoft SQL Server Constraints: Must contain from 8 to 128 - characters. MySQL Constraints: Must contain from 8 to 41 characters. Oracle - Constraints: Must contain from 8 to 30 characters. PostgreSQL Constraints: Must contain + key ARN or alias ARN. This setting doesn't apply to Amazon Aurora DB instances. The Amazon + Web Services KMS key identifier is managed by the DB cluster. For more information, see + CreateDBCluster. If StorageEncrypted is enabled, and you do not specify a value for the + KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key + for your Amazon Web Services account. Your Amazon Web Services account has a different + default KMS key for each Amazon Web Services Region. For Amazon RDS Custom, a KMS key is + required for DB instances. For most RDS engines, if you leave this parameter empty while + enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't + use the default key when this parameter is empty. You must explicitly specify a key. +- `"LicenseModel"`: The license model information for this DB instance. This setting + doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for MariaDB + - general-public-license RDS for Microsoft SQL Server - license-included RDS for + MySQL - general-public-license RDS for Oracle - bring-your-own-license | + license-included RDS for PostgreSQL - postgresql-license +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. For more information, see Password management with + Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't + manage the master user password with Amazon Web Services Secrets Manager if + MasterUserPassword is specified. +- `"MasterUserPassword"`: The password for the master user. This setting doesn't apply to + Amazon Aurora DB instances. The password for the master user is managed by the DB cluster. + Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include + any printable ASCII character except \"/\", \"\"\", or \"@\". Length Constraints: RDS + for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must + contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. + RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. @@ -1853,120 +1876,121 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. -- `"MasterUsername"`: The name for the master user. Amazon Aurora Not applicable. The - name for the master user is managed by the DB cluster. Amazon RDS Constraints: - Required. Must be 1 to 16 letters, numbers, or underscores. First character must be a - letter. Can't be a reserved word for the chosen database engine. +- `"MasterUsername"`: The name for the master user. This setting doesn't apply to Amazon + Aurora DB instances. The name for the master user is managed by the DB cluster. This + setting is required for RDS DB instances. Constraints: Must be 1 to 16 letters, numbers, + or underscores. First character must be a letter. Can't be a reserved word for the + chosen database engine. - `"MaxAllocatedStorage"`: The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to - RDS Custom. Amazon Aurora Not applicable. Storage is managed by the DB cluster. + the following DB instances: Amazon Aurora (Storage is managed by the DB cluster.) RDS + Custom - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then you must set - MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. Valid - Values: 0, 1, 5, 10, 15, 30, 60 + metrics, specify 0. If MonitoringRoleArn is specified, then you must set MonitoringInterval + to a value other than 0. This setting doesn't apply to RDS Custom DB instances. Valid + Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn - value. This setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. You - can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. This - setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. DB instance - Availability Zones (AZs) are managed by the DB cluster. + value. This setting doesn't apply to RDS Custom DB instances. +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. You can't set + the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. This setting + doesn't apply to the following DB instances: Amazon Aurora (DB instance Availability + Zones (AZs) are managed by the DB cluster.) RDS Custom - `"NcharCharacterSetName"`: The name of the NCHAR character set for the Oracle DB - instance. This parameter doesn't apply to RDS Custom. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB instance. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon RDS User - Guide. -- `"OptionGroupName"`: A value that indicates that the DB instance should be associated - with the specified option group. Permanent options, such as the TDE option for Oracle - Advanced Security TDE, can't be removed from an option group. Also, that option group can't - be removed from a DB instance after it is associated with a DB instance. This setting - doesn't apply to RDS Custom. Amazon Aurora Not applicable. + instance. This setting doesn't apply to RDS Custom DB instances. +- `"NetworkType"`: The network type of the DB instance. The network type is determined by + the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 | DUAL +- `"OptionGroupName"`: The option group to associate the DB instance with. Permanent + options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an + option group. Also, that option group can't be removed from a DB instance after it is + associated with a DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB + instances. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the - key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value - for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a - default KMS key for your Amazon Web Services account. Your Amazon Web Services account has - a different default KMS key for each Amazon Web Services Region. This setting doesn't apply - to RDS Custom. + key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for + PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default + KMS key for your Amazon Web Services account. Your Amazon Web Services account has a + different default KMS key for each Amazon Web Services Region. This setting doesn't apply + to RDS Custom DB instances. - `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. This setting - doesn't apply to RDS Custom. -- `"Port"`: The port number on which the database accepts connections. MySQL Default: - 3306 Valid values: 1150-65535 Type: Integer MariaDB Default: 3306 Valid values: - 1150-65535 Type: Integer PostgreSQL Default: 5432 Valid values: 1150-65535 Type: - Integer Oracle Default: 1521 Valid values: 1150-65535 SQL Server Default: 1433 Valid - values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. Amazon - Aurora Default: 3306 Valid values: 1150-65535 Type: Integer + data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * + 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 + months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention + period that isn't valid, such as 94, Amazon RDS returns an error. +- `"Port"`: The port number on which the database accepts connections. This setting doesn't + apply to Aurora DB instances. The port number is managed by the cluster. Valid Values: + 1150-65535 Default: RDS for MariaDB - 3306 RDS for Microsoft SQL Server - 1433 RDS + for MySQL - 3306 RDS for Oracle - 1521 RDS for PostgreSQL - 5432 Constraints: + For RDS for Microsoft SQL Server, the value can't be 1234, 1434, 3260, 3343, 3389, 47001, + or 49152-49156. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window in the Amazon RDS User - Guide. Amazon Aurora Not applicable. The daily time range for creating automated backups - is managed by the DB cluster. Constraints: Must be in the format hh24:mi-hh24:mi. Must - be in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. + Guide. This setting doesn't apply to Amazon Aurora DB instances. The daily time range for + creating automated backups is managed by the DB cluster. Constraints: Must be in the + format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred maintenance window. Must be at least 30 minutes. - `"PreferredMaintenanceWindow"`: The time range each week during which system maintenance - can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS - Maintenance Window. Format: ddd:hh24:mi-ddd:hh24:mi The default is a 30-minute window - selected at random from an 8-hour block of time for each Amazon Web Services Region, - occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - Constraints: Minimum 30-minute window. + can occur. For more information, see Amazon RDS Maintenance Window in the Amazon RDS User + Guide. The default is a 30-minute window selected at random from an 8-hour block of time + for each Amazon Web Services Region, occurring on a random day of the week. Constraints: + Must be in the format ddd:hh24:mi-ddd:hh24:mi. The day values must be mon | tue | wed | + thu | fri | sat | sun. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred backup window. Must be at least 30 minutes. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. This setting doesn't apply to RDS Custom. Amazon - Aurora Not applicable. -- `"PromotionTier"`: A value that specifies the order in which an Aurora Replica is - promoted to the primary instance after a failure of the existing primary instance. For more - information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. - This setting doesn't apply to RDS Custom. Default: 1 Valid Values: 0 - 15 -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB instance is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB instance's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. - Access to the DB instance is ultimately controlled by the security group it uses. That - public access is not permitted if the security group assigned to the DB instance doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. Default: The default behavior varies - depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, - and PubliclyAccessible isn't specified, the following applies: If the default VPC in the + DB instance class of the DB instance. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"PromotionTier"`: The order of priority in which an Aurora Replica is promoted to the + primary instance after a failure of the existing primary instance. For more information, + see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting + doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB instance's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB instance's VPC. Access to the DB + instance is ultimately controlled by the security group it uses. That public access is not + permitted if the security group assigned to the DB instance doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. Default: The default behavior varies depending on whether + DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public. -- `"StorageEncrypted"`: A value that indicates whether the DB instance is encrypted. By - default, it isn't encrypted. For RDS Custom instances, either set this parameter to true or - leave it unset. If you set this parameter to false, RDS reports an error. Amazon Aurora - Not applicable. The encryption for DB instances is managed by the DB cluster. -- `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This - setting applies only to the gp3 storage type. This setting doesn't apply to RDS Custom or - Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the DB instance. Valid - values: gp2 | gp3 | io1 | standard If you specify io1 or gp3, you must also include a - value for the Iops parameter. Default: io1 if the Iops parameter is specified, otherwise - gp2 Amazon Aurora Not applicable. Storage is managed by the DB cluster. +- `"StorageEncrypted"`: Specifes whether the DB instance is encrypted. By default, it isn't + encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. + Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB + instances. The encryption for DB instances is managed by the DB cluster. +- `"StorageThroughput"`: The storage throughput value for the DB instance. This setting + applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"StorageType"`: The storage type to associate with the DB instance. If you specify io1 + or gp3, you must also include a value for the Iops parameter. This setting doesn't apply to + Amazon Aurora DB instances. Storage is managed by the DB cluster. Valid Values: gp2 | gp3 | + io1 | standard Default: io1, if the Iops parameter is specified. Otherwise, gp2. - `"Tags"`: Tags to assign to the DB instance. - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for - TDE encryption. This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. + TDE encryption. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to - access the device. This setting doesn't apply to RDS Custom. + access the device. This setting doesn't apply to RDS Custom DB instances. - `"Timezone"`: The time zone of the DB instance. The time zone parameter is currently supported only by Microsoft SQL Server. - `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to associate with this - DB instance. Amazon Aurora Not applicable. The associated list of EC2 VPC security groups - is managed by the DB cluster. Default: The default EC2 VPC security group for the DB subnet - group's VPC. + DB instance. This setting doesn't apply to Amazon Aurora DB instances. The associated list + of EC2 VPC security groups is managed by the DB cluster. Default: The default EC2 VPC + security group for the DB subnet group's VPC. """ function create_dbinstance( DBInstanceClass, @@ -2082,8 +2106,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain - `"DomainIAMRoleName"`: The name of the IAM role to be used when making API calls to the Directory Service. This setting doesn't apply to RDS Custom. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User @@ -2813,25 +2850,36 @@ Creates an Aurora global database spread across multiple Amazon Web Services Reg global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem. You can create a global database -that is initially empty, and then add a primary cluster and a secondary cluster to it. Or -you can specify an existing Aurora cluster during the create operation, and this cluster -becomes the primary cluster of the global database. This action applies only to Aurora DB -clusters. +that is initially empty, and then create the primary and secondary DB clusters in the +global database. Or you can specify an existing Aurora cluster during the create operation, +and this cluster becomes the primary cluster of the global database. This operation +applies only to Aurora DB clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. If you - do not provide a name, Amazon Aurora will not create a database in the global database - cluster you are creating. -- `"DeletionProtection"`: The deletion protection setting for the new global database. The - global database can't be deleted when deletion protection is enabled. -- `"Engine"`: The name of the database engine to be used for this DB cluster. -- `"EngineVersion"`: The engine version of the Aurora global database. -- `"GlobalClusterIdentifier"`: The cluster identifier of the new global database cluster. + don't specify a name, Amazon Aurora doesn't create a database in the global database + cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In + this case, Amazon Aurora uses the database name from the source DB cluster. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the new + global database cluster. The global database can't be deleted when deletion protection is + enabled. +- `"Engine"`: The database engine to use for this global database cluster. Valid Values: + aurora-mysql | aurora-postgresql Constraints: Can't be specified if + SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine of the + source DB cluster. +- `"EngineVersion"`: The engine version to use for this global database cluster. + Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, + Amazon Aurora uses the engine version of the source DB cluster. +- `"GlobalClusterIdentifier"`: The cluster identifier for this global database cluster. This parameter is stored as a lowercase string. - `"SourceDBClusterIdentifier"`: The Amazon Resource Name (ARN) to use as the primary - cluster of the global database. This parameter is optional. -- `"StorageEncrypted"`: The storage encryption setting for the new global database cluster. + cluster of the global database. If you provide a value for this parameter, don't specify + values for the following settings because Amazon Aurora uses the values from the specified + source DB cluster: DatabaseName Engine EngineVersion StorageEncrypted +- `"StorageEncrypted"`: Specifies whether to enable storage encryption for the new global + database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is + specified. In this case, Amazon Aurora uses the setting from the source DB cluster. """ function create_global_cluster(; aws_config::AbstractAWSConfig=global_aws_config()) return rds( @@ -2923,19 +2971,18 @@ end delete_blue_green_deployment(blue_green_deployment_identifier, params::Dict{String,<:Any}) Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green -Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS +Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Arguments -- `blue_green_deployment_identifier`: The blue/green deployment identifier of the - deployment to be deleted. This parameter isn't case-sensitive. Constraints: Must match - an existing blue/green deployment identifier. +- `blue_green_deployment_identifier`: The unique identifier of the blue/green deployment to + delete. This parameter isn't case-sensitive. Constraints: Must match an existing + blue/green deployment identifier. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DeleteTarget"`: A value that indicates whether to delete the resources in the green - environment. You can't specify this option if the blue/green deployment status is - SWITCHOVER_COMPLETED. +- `"DeleteTarget"`: Specifies whether to delete the resources in the green environment. You + can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED. """ function delete_blue_green_deployment( BlueGreenDeploymentIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -3761,33 +3808,33 @@ end describe_blue_green_deployments() describe_blue_green_deployments(params::Dict{String,<:Any}) -Returns information about blue/green deployments. For more information, see Using Amazon -RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using -Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. +Describes one or more blue/green deployments. For more information, see Using Amazon RDS +Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon +RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BlueGreenDeploymentIdentifier"`: The blue/green deployment identifier. If this - parameter is specified, information from only the specific blue/green deployment is - returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an - existing blue/green deployment identifier. +- `"BlueGreenDeploymentIdentifier"`: The blue/green deployment identifier. If you specify + this parameter, the response only includes information about the specific blue/green + deployment. This parameter isn't case-sensitive. Constraints: Must match an existing + blue/green deployment identifier. - `"Filters"`: A filter that specifies one or more blue/green deployments to describe. - Supported filters: blue-green-deployment-identifier - Accepts system-generated - identifiers for blue/green deployments. The results list only includes information about - the blue/green deployments with the specified identifiers. blue-green-deployment-name - - Accepts user-supplied names for blue/green deployments. The results list only includes - information about the blue/green deployments with the specified names. source - Accepts - source databases for a blue/green deployment. The results list only includes information - about the blue/green deployments with the specified source databases. target - Accepts - target databases for a blue/green deployment. The results list only includes information - about the blue/green deployments with the specified target databases. + Valid Values: blue-green-deployment-identifier - Accepts system-generated identifiers + for blue/green deployments. The results list only includes information about the blue/green + deployments with the specified identifiers. blue-green-deployment-name - Accepts + user-supplied names for blue/green deployments. The results list only includes information + about the blue/green deployments with the specified names. source - Accepts source + databases for a blue/green deployment. The results list only includes information about the + blue/green deployments with the specified source databases. target - Accepts target + databases for a blue/green deployment. The results list only includes information about the + blue/green deployments with the specified target databases. - `"Marker"`: An optional pagination token provided by a previous - DescribeBlueGreenDeployments request. If this parameter is specified, the response includes - only records beyond the marker, up to the value specified by MaxRecords. + DescribeBlueGreenDeployments request. If you specify this parameter, the response only + includes records beyond the marker, up to the value specified by MaxRecords. - `"MaxRecords"`: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included - in the response so you can retrieve the remaining results. Default: 100 Constraints: - Minimum 20, maximum 100. + in the response so you can retrieve the remaining results. Default: 100 Constraints: Must + be a minimum of 20. Can't exceed 100. """ function describe_blue_green_deployments(; aws_config::AbstractAWSConfig=global_aws_config() @@ -4183,20 +4230,20 @@ end describe_dbclusters() describe_dbclusters(params::Dict{String,<:Any}) -Returns information about Amazon Aurora DB clusters and Multi-AZ DB clusters. This API -supports pagination. For more information on Amazon Aurora DB clusters, see What is Amazon -Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see +Describes existing Amazon Aurora DB clusters and Multi-AZ DB clusters. This API supports +pagination. For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? +in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. This operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DBClusterIdentifier"`: The user-supplied DB cluster identifier or the Amazon Resource - Name (ARN) of the DB cluster. If this parameter is specified, information from only the + Name (ARN) of the DB cluster. If this parameter is specified, information for only the specific DB cluster is returned. This parameter isn't case-sensitive. Constraints: If - supplied, must match an existing DBClusterIdentifier. + supplied, must match an existing DB cluster identifier. - `"Filters"`: A filter that specifies one or more DB clusters to describe. Supported - filters: clone-group-id - Accepts clone group identifiers. The results list only + Filters: clone-group-id - Accepts clone group identifiers. The results list only includes information about the DB clusters associated with these clone groups. db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB clusters identified by these ARNs. @@ -4206,8 +4253,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys includes information about the DB clusters associated with these domains. engine - Accepts engine names. The results list only includes information about the DB clusters for these engines. -- `"IncludeShared"`: Optional Boolean parameter that specifies whether the output includes - information about clusters shared from other Amazon Web Services accounts. +- `"IncludeShared"`: Specifies whether the output includes information about clusters + shared from other Amazon Web Services accounts. - `"Marker"`: An optional pagination token provided by a previous DescribeDBClusters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -4356,27 +4403,26 @@ end describe_dbinstances() describe_dbinstances(params::Dict{String,<:Any}) -Returns information about provisioned RDS instances. This API supports pagination. This -operation can also return information for Amazon Neptune DB instances and Amazon DocumentDB -instances. +Describes provisioned RDS instances. This API supports pagination. This operation can also +return information for Amazon Neptune DB instances and Amazon DocumentDB instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DBInstanceIdentifier"`: The user-supplied instance identifier or the Amazon Resource Name (ARN) of the DB instance. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn't case-sensitive. Constraints: If - supplied, must match the identifier of an existing DBInstance. + supplied, must match the identifier of an existing DB instance. - `"Filters"`: A filter that specifies one or more DB instances to describe. Supported - filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource + Filters: db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list only includes information about the DB instances associated with the DB clusters identified by these ARNs. db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list only includes information about the DB instances identified by these ARNs. dbi-resource-id - Accepts - DB instance resource identifiers. The results list will only include information about the - DB instances identified by these DB instance resource identifiers. domain - Accepts - Active Directory directory IDs. The results list only includes information about the DB - instances associated with these domains. engine - Accepts engine names. The results list - only includes information about the DB instances for these engines. + DB instance resource identifiers. The results list only includes information about the DB + instances identified by these DB instance resource identifiers. domain - Accepts Active + Directory directory IDs. The results list only includes information about the DB instances + associated with these domains. engine - Accepts engine names. The results list only + includes information about the DB instances for these engines. - `"Marker"`: An optional pagination token provided by a previous DescribeDBInstances request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -6049,7 +6095,7 @@ end modify_dbcluster(dbcluster_identifier) modify_dbcluster(dbcluster_identifier, params::Dict{String,<:Any}) -Modify the settings for an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can +Modifies the settings of an Amazon Aurora DB cluster or a Multi-AZ DB cluster. You can change one or more settings by specifying these parameters and the new values in the request. For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, see Multi-AZ @@ -6057,102 +6103,102 @@ DB cluster deployments in the Amazon RDS User Guide. # Arguments - `dbcluster_identifier`: The DB cluster identifier for the cluster being modified. This - parameter isn't case-sensitive. Constraints: This identifier must match the identifier of - an existing DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + parameter isn't case-sensitive. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Constraints: Must match the identifier of an existing DB cluster. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The amount of storage in gibibytes (GiB) to allocate to each DB - instance in the Multi-AZ DB cluster. Valid for: Multi-AZ DB clusters only -- `"AllowEngineModeChange"`: A value that indicates whether engine mode changes from - serverless to provisioned are allowed. Constraints: You must allow engine mode changes when - specifying a different value for the EngineMode parameter from the DB cluster's current - engine mode. Valid for: Aurora Serverless v1 DB clusters only -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Constraints: You must allow major version upgrades when specifying a value for the - EngineVersion parameter that is a different major version than the DB cluster's current - version. Valid for: Aurora DB clusters only -- `"ApplyImmediately"`: A value that indicates whether the modifications in this request - and any pending modifications are asynchronously applied as soon as possible, regardless of - the PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, + instance in the Multi-AZ DB cluster. Valid for Cluster Type: Multi-AZ DB clusters only +- `"AllowEngineModeChange"`: Specifies whether engine mode changes from serverless to + provisioned are allowed. Valid for Cluster Type: Aurora Serverless v1 DB clusters only + Constraints: You must allow engine mode changes when specifying a different value for the + EngineMode parameter from the DB cluster's current engine mode. +- `"AllowMajorVersionUpgrade"`: Specifies whether major version upgrades are allowed. Valid + for Cluster Type: Aurora DB clusters only Constraints: You must allow major version + upgrades when specifying a value for the EngineVersion parameter that is a different major + version than the DB cluster's current version. +- `"ApplyImmediately"`: Specifies whether the modifications in this request and any pending + modifications are asynchronously applied as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the DB cluster. If this parameter is disabled, changes to the DB cluster are applied during the next maintenance window. Most modifications can be applied immediately or during the next scheduled maintenance window. Some modifications, such as turning on deletion protection and changing the master password, are applied immediately—regardless of when you choose to apply them. By - default, this parameter is disabled. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor engine upgrades are - applied automatically to the DB cluster during the maintenance window. By default, minor - engine upgrades are applied automatically. Valid for: Multi-AZ DB clusters only + default, this parameter is disabled. Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters +- `"AutoMinorVersionUpgrade"`: Specifies whether minor engine upgrades are applied + automatically to the DB cluster during the maintenance window. By default, minor engine + upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only - `"BacktrackWindow"`: The target backtrack window, in seconds. To disable backtracking, - set this value to 0. Default: 0 Constraints: If specified, this value must be set to a - number from 0 to 259,200 (72 hours). Valid for: Aurora MySQL DB clusters only + set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 + Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 + hours). - `"BackupRetentionPeriod"`: The number of days for which automated backups are retained. - Specify a minimum value of 1. Default: 1 Constraints: Must be a value from 1 to 35 - Valid for: Aurora DB clusters and Multi-AZ DB clusters + Specify a minimum value of 1. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Default: 1 Constraints: Must be a value from 1 to 35. - `"CloudwatchLogsExportConfiguration"`: The configuration setting for the log types to be - enabled for export to CloudWatch Logs for a specific DB cluster. The values in the list - depend on the DB engine being used. RDS for MySQL Possible values are error, general, and - slowquery. RDS for PostgreSQL Possible values are postgresql and upgrade. Aurora MySQL - Possible values are audit, error, general, and slowquery. Aurora PostgreSQL Possible - value is postgresql. For more information about exporting CloudWatch Logs for Amazon RDS, - see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. For - more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database - Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. Valid for: Aurora DB - clusters and Multi-AZ DB clusters -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - cluster to snapshots of the DB cluster. The default is not to copy them. Valid for: Aurora - DB clusters and Multi-AZ DB clusters + enabled for export to CloudWatch Logs for a specific DB cluster. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters The following values are valid for each DB + engine: Aurora MySQL - audit | error | general | slowquery Aurora PostgreSQL - + postgresql RDS for MySQL - error | general | slowquery RDS for PostgreSQL - + postgresql | upgrade For more information about exporting CloudWatch Logs for Amazon + RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. + For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing + Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB cluster to + snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"DBClusterInstanceClass"`: The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS - User Guide. Valid for: Multi-AZ DB clusters only + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only - `"DBClusterParameterGroupName"`: The name of the DB cluster parameter group to use for - the DB cluster. Valid for: Aurora DB clusters and Multi-AZ DB clusters + the DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"DBInstanceParameterGroupName"`: The name of the DB parameter group to apply to all instances of the DB cluster. When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. - Default: The existing name setting Constraints: The DB parameter group must be in the - same DB parameter group family as this DB cluster. The DBInstanceParameterGroupName - parameter is valid in combination with the AllowMajorVersionUpgrade parameter for a major - version upgrade only. Valid for: Aurora DB clusters only -- `"DeletionProtection"`: A value that indicates whether the DB cluster has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. Valid for: Aurora DB clusters and Multi-AZ DB + Valid for Cluster Type: Aurora DB clusters only Default: The existing name setting + Constraints: The DB parameter group must be in the same DB parameter group family as this + DB cluster. The DBInstanceParameterGroupName parameter is valid in combination with the + AllowMajorVersionUpgrade parameter for a major version upgrade only. +- `"DeletionProtection"`: Specifies whether the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. By default, deletion + protection isn't enabled. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"Domain"`: The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation. For more information, see Kerberos Authentication in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. Valid for: Aurora DB clusters only -- `"EnableGlobalWriteForwarding"`: A value that indicates whether to enable this DB cluster - to forward write operations to the primary cluster of an Aurora global database - (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that - are secondary clusters in an Aurora global database. You can set this value only on Aurora - DB clusters that are members of an Aurora global database. With this parameter enabled, a - secondary cluster can forward writes to the current primary cluster and the resulting - changes are replicated back to this cluster. For the primary DB cluster of an Aurora global - database, this value is used immediately if the primary is demoted by the - FailoverGlobalCluster API operation, but it does nothing until then. Valid for: Aurora DB - clusters only -- `"EnableHttpEndpoint"`: A value that indicates whether to enable the HTTP endpoint for an - Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, - the HTTP endpoint provides a connectionless web service API for running SQL queries on the - Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS - console with the query editor. For more information, see Using the Data API for Aurora - Serverless v1 in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. For more information, see IAM Database Authentication in - the Amazon Aurora User Guide. Valid for: Aurora DB clusters only -- `"EnablePerformanceInsights"`: A value that indicates whether to turn on Performance - Insights for the DB cluster. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. Valid for: Multi-AZ DB clusters only + Guide. Valid for Cluster Type: Aurora DB clusters only +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. Valid for Cluster Type: Aurora DB clusters only +- `"EnableGlobalWriteForwarding"`: Specifies whether to enable this DB cluster to forward + write operations to the primary cluster of a global cluster (Aurora global database). By + default, write operations are not allowed on Aurora DB clusters that are secondary clusters + in an Aurora global database. You can set this value only on Aurora DB clusters that are + members of an Aurora global database. With this parameter enabled, a secondary cluster can + forward writes to the current primary cluster, and the resulting changes are replicated + back to this cluster. For the primary DB cluster of an Aurora global database, this value + is used immediately if the primary is demoted by a global cluster API operation, but it + does nothing until then. Valid for Cluster Type: Aurora DB clusters only +- `"EnableHttpEndpoint"`: Specifies whether to enable the HTTP endpoint for an Aurora + Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. When enabled, the HTTP + endpoint provides a connectionless web service API for running SQL queries on the Aurora + Serverless v1 DB cluster. You can also query your database from inside the RDS console with + the query editor. For more information, see Using the Data API for Aurora Serverless v1 in + the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon + Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only +- `"EnablePerformanceInsights"`: Specifies whether to turn on Performance Insights for the + DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. Valid for Cluster Type: Multi-AZ DB clusters only - `"EngineMode"`: The DB engine mode of the DB cluster, either provisioned or serverless. The DB engine mode can be modified only from serverless to provisioned. For more - information, see CreateDBCluster. Valid for: Aurora DB clusters only + information, see CreateDBCluster. Valid for Cluster Type: Aurora DB clusters only - `"EngineVersion"`: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled. If the cluster that you're @@ -6166,28 +6212,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions - --engine postgres --query \"DBEngineVersions[].EngineVersion\" Valid for: Aurora DB - clusters and Multi-AZ DB clusters + --engine postgres --query \"DBEngineVersions[].EngineVersion\" Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters - `"Iops"`: The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. - Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster. - Valid for: Multi-AZ DB clusters only -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the - master user password with Amazon Web Services Secrets Manager, you can turn on this - management. In this case, you can't specify MasterUserPassword. If the DB cluster already - manages the master user password with Amazon Web Services Secrets Manager, and you specify - that the master user password is not managed with Amazon Web Services Secrets Manager, then - you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new - password for the master user specified by MasterUserPassword. For more information, see - Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide - and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters -- `"MasterUserPassword"`: The new password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned - on. Valid for: Aurora DB clusters and Multi-AZ DB clusters + Valid for Cluster Type: Multi-AZ DB clusters only Constraints: Must be a multiple between + .5 and 50 of the storage amount for the DB cluster. +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the master user + password with Amazon Web Services Secrets Manager, you can turn on this management. In this + case, you can't specify MasterUserPassword. If the DB cluster already manages the master + user password with Amazon Web Services Secrets Manager, and you specify that the master + user password is not managed with Amazon Web Services Secrets Manager, then you must + specify MasterUserPassword. In this case, RDS deletes the secret and uses the new password + for the master user specified by MasterUserPassword. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and + Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUserPassword"`: The new password for the master database user. Valid for Cluster + Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 8 to 41 + characters. Can contain any printable ASCII character except \"/\", \"\"\", or \"@\". + Can't be specified if ManageMasterUserPassword is turned on. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The DB cluster @@ -6203,81 +6249,82 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon - Web Services Region. Valid for: Aurora DB clusters and Multi-AZ DB clusters + Web Services Region. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring - metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, also set - MonitoringInterval to a value other than 0. Valid Values: 0, 1, 5, 10, 15, 30, 60 Valid - for: Multi-AZ DB clusters only + metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a + value other than 0. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. Valid - for: Multi-AZ DB clusters only -- `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB cluster. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon Aurora User - Guide. Valid for: Aurora DB clusters only + for Cluster Type: Multi-AZ DB clusters only +- `"NetworkType"`: The network type of the DB cluster. The network type is determined by + the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB + clusters only Valid Values: IPV4 | DUAL - `"NewDBClusterIdentifier"`: The new DB cluster identifier for the DB cluster when - renaming a DB cluster. This value is stored as a lowercase string. Constraints: Must - contain from 1 to 63 letters, numbers, or hyphens The first character must be a letter - Can't end with a hyphen or contain two consecutive hyphens Example: my-cluster2 Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"OptionGroupName"`: A value that indicates that the DB cluster should be associated with - the specified option group. DB clusters are associated with a default option group that - can't be modified. + renaming a DB cluster. This value is stored as a lowercase string. Valid for Cluster Type: + Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 + letters, numbers, or hyphens. The first character must be a letter. Can't end with a + hyphen or contain two consecutive hyphens. Example: my-cluster2 +- `"OptionGroupName"`: The option group to associate the DB cluster with. DB clusters are + associated with a default option group that can't be modified. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a - different default KMS key for each Amazon Web Services Region. Valid for: Multi-AZ DB - clusters only -- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. Valid for: + different default KMS key for each Amazon Web Services Region. Valid for Cluster Type: Multi-AZ DB clusters only -- `"Port"`: The port number on which the DB cluster accepts connections. Constraints: Value - must be 1150-65535 Default: The same port as the original DB cluster. Valid for: Aurora DB - clusters only +- `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights + data. Valid for Cluster Type: Multi-AZ DB clusters only Valid Values: 7 month * 31, + where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * + 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that + isn't valid, such as 94, Amazon RDS issues an error. +- `"Port"`: The port number on which the DB cluster accepts connections. Valid for Cluster + Type: Aurora DB clusters only Valid Values: 1150-65535 Default: The same port as the + original DB cluster. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the - Amazon Aurora User Guide. Constraints: Must be in the format hh24:mi-hh24:mi. Must be - in Universal Coordinated Time (UTC). Must not conflict with the preferred maintenance - window. Must be at least 30 minutes. Valid for: Aurora DB clusters and Multi-AZ DB - clusters + Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Constraints: Must be in the format hh24:mi-hh24:mi. Must be in Universal + Coordinated Time (UTC). Must not conflict with the preferred maintenance window. Must + be at least 30 minutes. - `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can - occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi The default is - a 30-minute window selected at random from an 8-hour block of time for each Amazon Web - Services Region, occurring on a random day of the week. To see the time blocks available, - see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. - Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. Valid - for: Aurora DB clusters and Multi-AZ DB clusters -- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed - by Amazon Web Services Secrets Manager for the master user password. This setting is valid - only if the master user password is managed by RDS in Amazon Web Services Secrets Manager - for the DB cluster. The secret value contains the updated password. For more information, - see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User - Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora - User Guide. Constraints: You must apply the change immediately when rotating the master - user password. Valid for: Aurora DB clusters and Multi-AZ DB clusters + occur, in Universal Coordinated Time (UTC). Valid for Cluster Type: Aurora DB clusters and + Multi-AZ DB clusters The default is a 30-minute window selected at random from an 8-hour + block of time for each Amazon Web Services Region, occurring on a random day of the week. + To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance + Window in the Amazon Aurora User Guide. Constraints: Must be in the format + ddd:hh24:mi-ddd:hh24:mi. Days must be one of Mon | Tue | Wed | Thu | Fri | Sat | Sun. + Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. +- `"RotateMasterUserPassword"`: Specifies whether to rotate the secret managed by Amazon + Web Services Secrets Manager for the master user password. This setting is valid only if + the master user password is managed by RDS in Amazon Web Services Secrets Manager for the + DB cluster. The secret value contains the updated password. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide + and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: + You must apply the change immediately when rotating the master user password. - `"ScalingConfiguration"`: The scaling properties of the DB cluster. You can only modify - scaling properties for DB clusters in serverless DB engine mode. Valid for: Aurora DB - clusters only + scaling properties for DB clusters in serverless DB engine mode. Valid for Cluster Type: + Aurora DB clusters only - `"ServerlessV2ScalingConfiguration"`: -- `"StorageType"`: Specifies the storage type to be associated with the DB cluster. When - specified for a Multi-AZ DB cluster, a value for the Iops parameter is required. Valid - values: aurora, aurora-iopt1 (Aurora DB clusters); io1 (Multi-AZ DB clusters) Default: - aurora (Aurora DB clusters); io1 (Multi-AZ DB clusters) Valid for: Aurora DB clusters and - Multi-AZ DB clusters -- `"VpcSecurityGroupIds"`: A list of VPC security groups that the DB cluster will belong - to. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"StorageType"`: The storage type to associate with the DB cluster. For information on + storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB + clusters. For information on storage types for Multi-AZ DB clusters, see Settings for + creating Multi-AZ DB clusters. When specified for a Multi-AZ DB cluster, a value for the + Iops parameter is required. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB + clusters Valid Values: Aurora DB clusters - aurora | aurora-iopt1 Multi-AZ DB clusters + - io1 Default: Aurora DB clusters - aurora Multi-AZ DB clusters - io1 +- `"VpcSecurityGroupIds"`: A list of EC2 VPC security groups to associate with this DB + cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters """ function modify_dbcluster( DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -6533,43 +6580,44 @@ modifications you can make to your DB instance, call DescribeValidDBInstanceModi before you call ModifyDBInstance. # Arguments -- `dbinstance_identifier`: The DB instance identifier. This value is stored as a lowercase - string. Constraints: Must match the identifier of an existing DBInstance. +- `dbinstance_identifier`: The identifier of DB instance to modify. This value is stored as + a lowercase string. Constraints: Must match the identifier of an existing DB instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AllocatedStorage"`: The new amount of storage in gibibytes (GiB) to allocate for the DB - instance. For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least - 10% greater than the current value. Values that are not at least 10% greater than the - existing value are rounded up so that they are 10% greater than the current value. For the - valid values for allocated storage for each engine, see CreateDBInstance. -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Changing this parameter doesn't result in an outage and the change is - asynchronously applied as soon as possible. This setting doesn't apply to RDS Custom. - Constraints: Major version upgrades must be allowed when specifying a value for the - EngineVersion parameter that is a different major version than the DB instance's current + instance. For RDS for MariaDB, RDS for MySQL, RDS for Oracle, and RDS for PostgreSQL, the + value supplied must be at least 10% greater than the current value. Values that are not at + least 10% greater than the existing value are rounded up so that they are 10% greater than + the current value. For the valid values for allocated storage for each engine, see + CreateDBInstance. +- `"AllowMajorVersionUpgrade"`: Specifies whether major version upgrades are allowed. + Changing this parameter doesn't result in an outage and the change is asynchronously + applied as soon as possible. This setting doesn't apply to RDS Custom DB instances. + Constraints: Major version upgrades must be allowed when specifying a value for the + EngineVersion parameter that's a different major version than the DB instance's current version. -- `"ApplyImmediately"`: A value that indicates whether the modifications in this request - and any pending modifications are asynchronously applied as soon as possible, regardless of - the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is +- `"ApplyImmediately"`: Specifies whether the modifications in this request and any pending + modifications are asynchronously applied as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled. If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in Modifying a DB Instance in the Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied. -- `"AutoMinorVersionUpgrade"`: A value that indicates whether minor version upgrades are - applied automatically to the DB instance during the maintenance window. An outage occurs - when all the following conditions are met: The automatic upgrade is enabled for the - maintenance window. A newer minor version is available. RDS has enabled automatic - patching for the engine version. If any of the preceding conditions isn't met, RDS - applies the change as soon as possible and doesn't cause an outage. For an RDS Custom DB - instance, set AutoMinorVersionUpgrade to false. Otherwise, the operation returns an error. -- `"AutomationMode"`: The automation mode of the RDS Custom DB instance: full or all - paused. If full, the DB instance automates monitoring and instance recovery. If all paused, - the instance pauses automation for the duration set by ResumeFullAutomationModeMinutes. +- `"AutoMinorVersionUpgrade"`: Specifies whether minor version upgrades are applied + automatically to the DB instance during the maintenance window. An outage occurs when all + the following conditions are met: The automatic upgrade is enabled for the maintenance + window. A newer minor version is available. RDS has enabled automatic patching for the + engine version. If any of the preceding conditions isn't met, Amazon RDS applies the + change as soon as possible and doesn't cause an outage. For an RDS Custom DB instance, + don't enable this setting. Otherwise, the operation returns an error. +- `"AutomationMode"`: The automation mode of the RDS Custom DB instance. If full, the DB + instance automates monitoring and instance recovery. If all paused, the instance pauses + automation for the duration set by ResumeFullAutomationModeMinutes. - `"AwsBackupRecoveryPointArn"`: The Amazon Resource Name (ARN) of the recovery point in - Amazon Web Services Backup. This setting doesn't apply to RDS Custom. + Amazon Web Services Backup. This setting doesn't apply to RDS Custom DB instances. - `"BackupRetentionPeriod"`: The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. Enabling and disabling backups can result in a brief I/O suspension @@ -6577,38 +6625,36 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance. These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously applied as soon - as possible. Amazon Aurora Not applicable. The retention period for automated backups is - managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses - existing setting Constraints: It must be a value from 0 to 35. It can't be set to 0 if - the DB instance is a source to read replicas. It can't be set to 0 for an RDS Custom for - Oracle DB instance. It can be specified for a MySQL read replica only if the source is - running MySQL 5.6 or later. It can be specified for a PostgreSQL read replica only if the - source is running PostgreSQL 9.3.5. -- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB - instance’s server certificate. This setting doesn't apply to RDS Custom. For more + as possible. This setting doesn't apply to Amazon Aurora DB instances. The retention period + for automated backups is managed by the DB cluster. For more information, see + ModifyDBCluster. Default: Uses existing setting Constraints: Must be a value from 0 to + 35. Can't be set to 0 if the DB instance is a source to read replicas. Can't be set to + 0 for an RDS Custom for Oracle DB instance. +- `"CACertificateIdentifier"`: The CA certificate identifier to use for the DB instance's + server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide. -- `"CertificateRotationRestart"`: A value that indicates whether the DB instance is - restarted when you rotate your SSL/TLS certificate. By default, the DB instance is - restarted when you rotate your SSL/TLS certificate. The certificate is not updated until - the DB instance is restarted. Set this parameter only if you are not using SSL/TLS to - connect to the DB instance. If you are using SSL/TLS to connect to the DB instance, follow - the appropriate instructions for your DB engine to rotate your SSL/TLS certificate: For - more information about rotating your SSL/TLS certificate for RDS DB engines, see Rotating - Your SSL/TLS Certificate. in the Amazon RDS User Guide. For more information about - rotating your SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS - Certificate in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom. -- `"CloudwatchLogsExportConfiguration"`: The configuration setting for the log types to be - enabled for export to CloudWatch Logs for a specific DB instance. A change to the +- `"CertificateRotationRestart"`: Specifies whether the DB instance is restarted when you + rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate + your SSL/TLS certificate. The certificate is not updated until the DB instance is + restarted. Set this parameter only if you are not using SSL/TLS to connect to the DB + instance. If you are using SSL/TLS to connect to the DB instance, follow the appropriate + instructions for your DB engine to rotate your SSL/TLS certificate: For more information + about rotating your SSL/TLS certificate for RDS DB engines, see Rotating Your SSL/TLS + Certificate. in the Amazon RDS User Guide. For more information about rotating your + SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS Certificate in the + Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. +- `"CloudwatchLogsExportConfiguration"`: The log types to be enabled for export to + CloudWatch Logs for a specific DB instance. A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect. This setting doesn't - apply to RDS Custom. -- `"CopyTagsToSnapshot"`: A value that indicates whether to copy all tags from the DB - instance to snapshots of the DB instance. By default, tags are not copied. Amazon Aurora - Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value - for an Aurora DB instance has no effect on the DB cluster setting. For more information, - see ModifyDBCluster. + apply to RDS Custom DB instances. +- `"CopyTagsToSnapshot"`: Specifies whether to copy all tags from the DB instance to + snapshots of the DB instance. By default, tags aren't copied. This setting doesn't apply to + Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting + this value for an Aurora DB instance has no effect on the DB cluster setting. For more + information, see ModifyDBCluster. - `"DBInstanceClass"`: The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for @@ -6624,58 +6670,73 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys instance without failover. In this case, the DB instance isn't rebooted automatically, and the parameter changes aren't applied during the next maintenance window. However, if you modify dynamic parameters in the newly associated DB parameter group, these changes are - applied immediately without a reboot. This setting doesn't apply to RDS Custom. Default: - Uses existing setting Constraints: The DB parameter group must be in the same DB parameter + applied immediately without a reboot. This setting doesn't apply to RDS Custom DB + instances. Default: Uses existing setting Constraints: Must be in the same DB parameter group family as the DB instance. - `"DBPortNumber"`: The port number on which the database accepts connections. The value of the DBPortNumber parameter must not match any of the port values specified for options in the option group for the DB instance. If you change the DBPortNumber value, your database restarts regardless of the value of the ApplyImmediately parameter. This setting doesn't - apply to RDS Custom. MySQL Default: 3306 Valid values: 1150-65535 MariaDB Default: - 3306 Valid values: 1150-65535 PostgreSQL Default: 5432 Valid values: 1150-65535 Type: - Integer Oracle Default: 1521 Valid values: 1150-65535 SQL Server Default: 1433 Valid - values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. Amazon - Aurora Default: 3306 Valid values: 1150-65535 + apply to RDS Custom DB instances. Valid Values: 1150-65535 Default: Amazon Aurora - 3306 + RDS for MariaDB - 3306 RDS for Microsoft SQL Server - 1433 RDS for MySQL - 3306 + RDS for Oracle - 1521 RDS for PostgreSQL - 5432 Constraints: For RDS for Microsoft + SQL Server, the value can't be 1234, 1434, 3260, 3343, 3389, 47001, or 49152-49156. - `"DBSecurityGroups"`: A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied - as soon as possible. This setting doesn't apply to RDS Custom. Constraints: If supplied, - must match existing DBSecurityGroups. + as soon as possible. This setting doesn't apply to RDS Custom DB instances. Constraints: + If supplied, must match existing DB security groups. - `"DBSubnetGroupName"`: The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC. For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. Changing the subnet group causes an outage during the change. The change is applied during the next maintenance - window, unless you enable ApplyImmediately. This parameter doesn't apply to RDS Custom. - Constraints: If supplied, must match the name of an existing DBSubnetGroup. Example: + window, unless you enable ApplyImmediately. This parameter doesn't apply to RDS Custom DB + instances. Constraints: If supplied, must match existing DB subnet group. Example: mydbsubnetgroup -- `"DeletionProtection"`: A value that indicates whether the DB instance has deletion - protection enabled. The database can't be deleted when deletion protection is enabled. By - default, deletion protection isn't enabled. For more information, see Deleting a DB - Instance. +- `"DeletionProtection"`: Specifies whether the DB instance has deletion protection + enabled. The database can't be deleted when deletion protection is enabled. By default, + deletion protection isn't enabled. For more information, see Deleting a DB Instance. +- `"DisableDomain"`: Specifies whether to remove the DB instance from the Active Directory + domain. - `"Domain"`: The Active Directory directory ID to move the DB instance to. Specify none to remove the instance from its current domain. You must create the domain before this operation. Currently, you can create only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. For more information, see Kerberos - Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB + instances. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain - `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the - Directory Service. This setting doesn't apply to RDS Custom. -- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP - address (CoIP) for an RDS on Outposts DB instance. A CoIP provides local or external - connectivity to resources in your Outpost subnets through your on-premises network. For - some use cases, a CoIP can provide lower latency for connections to the DB instance from - outside of its virtual private cloud (VPC) on your local network. For more information - about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the - Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in - the Amazon Web Services Outposts User Guide. -- `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of - Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By - default, mapping isn't enabled. This setting doesn't apply to Amazon Aurora. Mapping Amazon - Web Services IAM accounts to database accounts is managed by the DB cluster. For more + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain +- `"EnableCustomerOwnedIp"`: Specifies whether to enable a customer-owned IP address (CoIP) + for an RDS on Outposts DB instance. A CoIP provides local or external connectivity to + resources in your Outpost subnets through your on-premises network. For some use cases, a + CoIP can provide lower latency for connections to the DB instance from outside of its + virtual private cloud (VPC) on your local network. For more information about RDS on + Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS + User Guide. For more information about CoIPs, see Customer-owned IP addresses in the Amazon + Web Services Outposts User Guide. +- `"EnableIAMDatabaseAuthentication"`: Specifies whether to enable mapping of Amazon Web + Services Identity and Access Management (IAM) accounts to database accounts. By default, + mapping isn't enabled. This setting doesn't apply to Amazon Aurora. Mapping Amazon Web + Services IAM accounts to database accounts is managed by the DB cluster. For more information about IAM database authentication, see IAM Database Authentication for MySQL - and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"EnablePerformanceInsights"`: A value that indicates whether to enable Performance - Insights for the DB instance. For more information, see Using Amazon Performance Insights - in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. + and PostgreSQL in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom DB + instances. +- `"EnablePerformanceInsights"`: Specifies whether to enable Performance Insights for the + DB instance. For more information, see Using Amazon Performance Insights in the Amazon RDS + User Guide. This setting doesn't apply to RDS Custom DB instances. - `"Engine"`: The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB to an Oracle Database 21c CDB. Note the following requirements: Make sure that you specify @@ -6693,10 +6754,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family. If you specify only - a major version, Amazon RDS will update the DB instance to the default minor version if the + a major version, Amazon RDS updates the DB instance to the default minor version if the current minor version is lower. For information about valid engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. If the instance that you're modifying - is acting as a read replica, the engine version that you specify must be the same or later + is acting as a read replica, the engine version that you specify must be the same or higher than the version that the source DB instance or cluster is running. In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the PATCH_DB_FAILURE lifecycle. @@ -6715,39 +6776,41 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a read replica for the instance, and creating - a DB snapshot of the instance. Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the - value supplied must be at least 10% greater than the current value. Values that are not at - least 10% greater than the existing value are rounded up so that they are 10% greater than - the current value. Default: Uses existing setting + a DB snapshot of the instance. Constraints: For RDS for MariaDB, RDS for MySQL, RDS for + Oracle, and RDS for PostgreSQL - The value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the existing value are rounded + up so that they are 10% greater than the current value. Default: Uses existing setting - `"LicenseModel"`: The license model for the DB instance. This setting doesn't apply to - RDS Custom. Valid values: license-included | bring-your-own-license | - general-public-license -- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user - password with Amazon Web Services Secrets Manager. If the DB instance doesn't manage the - master user password with Amazon Web Services Secrets Manager, you can turn on this - management. In this case, you can't specify MasterUserPassword. If the DB instance already - manages the master user password with Amazon Web Services Secrets Manager, and you specify - that the master user password is not managed with Amazon Web Services Secrets Manager, then - you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new + Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for MariaDB - + general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL + - general-public-license RDS for Oracle - bring-your-own-license | license-included + RDS for PostgreSQL - postgresql-license +- `"ManageMasterUserPassword"`: Specifies whether to manage the master user password with + Amazon Web Services Secrets Manager. If the DB instance doesn't manage the master user + password with Amazon Web Services Secrets Manager, you can turn on this management. In this + case, you can't specify MasterUserPassword. If the DB instance already manages the master + user password with Amazon Web Services Secrets Manager, and you specify that the master + user password is not managed with Amazon Web Services Secrets Manager, then you must + specify MasterUserPassword. In this case, Amazon RDS deletes the secret and uses the new password for the master user specified by MasterUserPassword. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. -- `"MasterUserPassword"`: The new password for the master user. The password can include - any printable ASCII character except \"/\", \"\"\", or \"@\". Changing this parameter +- `"MasterUserPassword"`: The new password for the master user. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword - element exists in the PendingModifiedValues element of the operation response. This setting - doesn't apply to RDS Custom. Amazon Aurora Not applicable. The password for the master - user is managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses - existing setting Constraints: Can't be specified if ManageMasterUserPassword is turned on. - MariaDB Constraints: Must contain from 8 to 41 characters. Microsoft SQL Server - Constraints: Must contain from 8 to 128 characters. MySQL Constraints: Must contain from - 8 to 41 characters. Oracle Constraints: Must contain from 8 to 30 characters. PostgreSQL - Constraints: Must contain from 8 to 128 characters. Amazon RDS API operations never - return the password, so this action provides a way to regain access to a primary instance - user if the password is lost. This includes restoring privileges that might have been - accidentally revoked. + element exists in the PendingModifiedValues element of the operation response. Amazon RDS + API operations never return the password, so this action provides a way to regain access to + a primary instance user if the password is lost. This includes restoring privileges that + might have been accidentally revoked. This setting doesn't apply to the following DB + instances: Amazon Aurora (The password for the master user is managed by the DB cluster. + For more information, see ModifyDBCluster.) RDS Custom Default: Uses existing setting + Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include + any printable ASCII character except \"/\", \"\"\", or \"@\". Length Constraints: RDS + for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must + contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. + RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain + from 8 to 128 characters. - `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if both of the following conditions are met: The DB instance @@ -6768,94 +6831,93 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to - RDS Custom. + RDS Custom DB instances. - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring - metrics are collected for the DB instance. To disable collecting Enhanced Monitoring - metrics, specify 0, which is the default. If MonitoringRoleArn is specified, set - MonitoringInterval to a value other than 0. This setting doesn't apply to RDS Custom. Valid - Values: 0, 1, 5, 10, 15, 30, 60 + metrics are collected for the DB instance. To disable collection of Enhanced Monitoring + metrics, specify 0. If MonitoringRoleArn is specified, set MonitoringInterval to a value + other than 0. This setting doesn't apply to RDS Custom DB instances. Valid Values: 0 | 1 | + 5 | 10 | 15 | 30 | 60 Default: 0 - `"MonitoringRoleArn"`: The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see To create an IAM role for Amazon RDS Enhanced Monitoring in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. This - setting doesn't apply to RDS Custom. -- `"MultiAZ"`: A value that indicates whether the DB instance is a Multi-AZ deployment. - Changing this parameter doesn't result in an outage. The change is applied during the next - maintenance window unless the ApplyImmediately parameter is enabled for this request. This - setting doesn't apply to RDS Custom. -- `"NetworkType"`: The network type of the DB instance. Valid values: IPV4 DUAL - The network type is determined by the DBSubnetGroup specified for the DB instance. A - DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - For more information, see Working with a DB instance in a VPC in the Amazon RDS User - Guide. -- `"NewDBInstanceIdentifier"`: The new DB instance identifier for the DB instance when - renaming a DB instance. When you change the DB instance identifier, an instance reboot - occurs immediately if you enable ApplyImmediately, or will occur during the next - maintenance window if you disable Apply Immediately. This value is stored as a lowercase - string. This setting doesn't apply to RDS Custom. Constraints: Must contain from 1 to 63 - letters, numbers, or hyphens. The first character must be a letter. Can't end with a - hyphen or contain two consecutive hyphens. Example: mydbinstance -- `"OptionGroupName"`: A value that indicates the DB instance should be associated with the - specified option group. Changing this parameter doesn't result in an outage, with one - exception. If the parameter change results in an option group that enables OEM, it can - cause a brief period, lasting less than a second, during which new connections are rejected - but existing connections aren't interrupted. The change is applied during the next - maintenance window unless the ApplyImmediately parameter is enabled for this request. - Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be - removed from an option group, and that option group can't be removed from a DB instance - after it is associated with a DB instance. This setting doesn't apply to RDS Custom. + setting doesn't apply to RDS Custom DB instances. +- `"MultiAZ"`: Specifies whether the DB instance is a Multi-AZ deployment. Changing this + parameter doesn't result in an outage. The change is applied during the next maintenance + window unless the ApplyImmediately parameter is enabled for this request. This setting + doesn't apply to RDS Custom DB instances. +- `"NetworkType"`: The network type of the DB instance. The network type is determined by + the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 + protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with + a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 | DUAL +- `"NewDBInstanceIdentifier"`: The new identifier for the DB instance when renaming a DB + instance. When you change the DB instance identifier, an instance reboot occurs immediately + if you enable ApplyImmediately, or will occur during the next maintenance window if you + disable ApplyImmediately. This value is stored as a lowercase string. This setting doesn't + apply to RDS Custom DB instances. Constraints: Must contain from 1 to 63 letters, + numbers, or hyphens. The first character must be a letter. Can't end with a hyphen or + contain two consecutive hyphens. Example: mydbinstance +- `"OptionGroupName"`: The option group to associate the DB instance with. Changing this + parameter doesn't result in an outage, with one exception. If the parameter change results + in an option group that enables OEM, it can cause a brief period, lasting less than a + second, during which new connections are rejected but existing connections aren't + interrupted. The change is applied during the next maintenance window unless the + ApplyImmediately parameter is enabled for this request. Permanent options, such as the TDE + option for Oracle Advanced Security TDE, can't be removed from an option group, and that + option group can't be removed from a DB instance after it is associated with a DB instance. + This setting doesn't apply to RDS Custom DB instances. - `"PerformanceInsightsKMSKeyId"`: The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the - key ARN, key ID, alias ARN, or alias name for the KMS key. If you do not specify a value - for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a - default KMS key for your Amazon Web Services account. Your Amazon Web Services account has - a different default KMS key for each Amazon Web Services Region. This setting doesn't apply - to RDS Custom. + key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for + PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default + KMS key for your Amazon Web Services account. Your Amazon Web Services account has a + different default KMS key for each Amazon Web Services Region. This setting doesn't apply + to RDS Custom DB instances. - `"PerformanceInsightsRetentionPeriod"`: The number of days to retain Performance Insights - data. The default is 7 days. The following values are valid: 7 month * 31, where month - is a number of months from 1-23 731 For example, the following values are valid: 93 - (3 months * 31) 341 (11 months * 31) 589 (19 months * 31) 731 If you specify a - retention period such as 94, which isn't a valid value, RDS issues an error. This setting - doesn't apply to RDS Custom. + data. This setting doesn't apply to RDS Custom DB instances. Valid Values: 7 month * + 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 + months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention + period that isn't valid, such as 94, Amazon RDS returns an error. - `"PreferredBackupWindow"`: The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more - information, see Backup window in the Amazon RDS User Guide. Amazon Aurora Not - applicable. The daily time range for creating automated backups is managed by the DB - cluster. For more information, see ModifyDBCluster. Constraints: Must be in the format - hh24:mi-hh24:mi Must be in Universal Time Coordinated (UTC) Must not conflict with the - preferred maintenance window Must be at least 30 minutes -- `"PreferredMaintenanceWindow"`: The weekly time range (in UTC) during which system - maintenance can occur, which might result in an outage. Changing this parameter doesn't - result in an outage, except in the following situation, and the change is asynchronously - applied as soon as possible. If there are pending actions that cause a reboot, and the - maintenance window is changed to include the current time, then changing this parameter - will cause a reboot of the DB instance. If moving this window to the current time, there - must be at least 30 minutes between the current time and end of the window to ensure - pending changes are applied. For more information, see Amazon RDS Maintenance Window in the - Amazon RDS User Guide. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi - Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes + information, see Backup window in the Amazon RDS User Guide. This setting doesn't apply to + Amazon Aurora DB instances. The daily time range for creating automated backups is managed + by the DB cluster. For more information, see ModifyDBCluster. Constraints: Must be in the + format hh24:mi-hh24:mi. Must be in Universal Coordinated Time (UTC). Must not conflict + with the preferred maintenance window. Must be at least 30 minutes. +- `"PreferredMaintenanceWindow"`: The weekly time range during which system maintenance can + occur, which might result in an outage. Changing this parameter doesn't result in an + outage, except in the following situation, and the change is asynchronously applied as soon + as possible. If there are pending actions that cause a reboot, and the maintenance window + is changed to include the current time, then changing this parameter causes a reboot of the + DB instance. If you change this window to the current time, there must be at least 30 + minutes between the current time and end of the window to ensure pending changes are + applied. For more information, see Amazon RDS Maintenance Window in the Amazon RDS User + Guide. Default: Uses existing setting Constraints: Must be in the format + ddd:hh24:mi-ddd:hh24:mi. The day values must be mon | tue | wed | thu | fri | sat | sun. + Must be in Universal Coordinated Time (UTC). Must not conflict with the preferred + backup window. Must be at least 30 minutes. - `"ProcessorFeatures"`: The number of CPU cores and the number of threads per core for the - DB instance class of the DB instance. This setting doesn't apply to RDS Custom. -- `"PromotionTier"`: A value that specifies the order in which an Aurora Replica is - promoted to the primary instance after a failure of the existing primary instance. For more - information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. - This setting doesn't apply to RDS Custom. Default: 1 Valid Values: 0 - 15 -- `"PubliclyAccessible"`: A value that indicates whether the DB instance is publicly - accessible. When the DB cluster is publicly accessible, its Domain Name System (DNS) - endpoint resolves to the private IP address from within the DB cluster's virtual private - cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. - Access to the DB cluster is ultimately controlled by the security group it uses. That - public access isn't permitted if the security group assigned to the DB cluster doesn't - permit it. When the DB instance isn't publicly accessible, it is an internal DB instance - with a DNS name that resolves to a private IP address. PubliclyAccessible only applies to - DB instances in a VPC. The DB instance must be part of a public subnet and - PubliclyAccessible must be enabled for it to be publicly accessible. Changes to the - PubliclyAccessible parameter are applied immediately regardless of the value of the - ApplyImmediately parameter. + DB instance class of the DB instance. This setting doesn't apply to RDS Custom DB instances. +- `"PromotionTier"`: The order of priority in which an Aurora Replica is promoted to the + primary instance after a failure of the existing primary instance. For more information, + see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting + doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 +- `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When + the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to + the private IP address from within the DB cluster's virtual private cloud (VPC). It + resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB + cluster is ultimately controlled by the security group it uses. That public access isn't + permitted if the security group assigned to the DB cluster doesn't permit it. When the DB + instance isn't publicly accessible, it is an internal DB instance with a DNS name that + resolves to a private IP address. PubliclyAccessible only applies to DB instances in a + VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled + for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied + immediately regardless of the value of the ApplyImmediately parameter. - `"ReplicaMode"`: A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for @@ -6863,46 +6925,47 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide. This setting - doesn't apply to RDS Custom. + doesn't apply to RDS Custom DB instances. - `"ResumeFullAutomationModeMinutes"`: The number of minutes to pause the automation. When - the time period ends, RDS Custom resumes full automation. The minimum value is 60 - (default). The maximum value is 1,440. -- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed - by Amazon Web Services Secrets Manager for the master user password. This setting is valid - only if the master user password is managed by RDS in Amazon Web Services Secrets Manager - for the DB cluster. The secret value contains the updated password. For more information, - see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User - Guide. Constraints: You must apply the change immediately when rotating the master user + the time period ends, RDS Custom resumes full automation. Default: 60 Constraints: Must + be at least 60. Must be no more than 1,440. +- `"RotateMasterUserPassword"`: Specifies whether to rotate the secret managed by Amazon + Web Services Secrets Manager for the master user password. This setting is valid only if + the master user password is managed by RDS in Amazon Web Services Secrets Manager for the + DB cluster. The secret value contains the updated password. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. + Constraints: You must apply the change immediately when rotating the master user password. -- `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This - setting applies only to the gp3 storage type. This setting doesn't apply to RDS Custom or - Amazon Aurora. -- `"StorageType"`: Specifies the storage type to be associated with the DB instance. If you - specify Provisioned IOPS (io1), you must also include a value for the Iops parameter. If - you choose to migrate your DB instance from using standard storage to using Provisioned - IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. - The duration of the migration depends on several factors such as database load, storage - size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and - the number of prior scale storage operations. Typical migration times are under 24 hours, - but the process can take up to several days in some cases. During the migration, the DB - instance is available for use, but might experience performance degradation. While the - migration takes place, nightly backups for the instance are suspended. No other Amazon RDS - operations can take place for the instance, including modifying the instance, rebooting the - instance, deleting the instance, creating a read replica for the instance, and creating a - DB snapshot of the instance. Valid values: gp2 | gp3 | io1 | standard Default: io1 if the - Iops parameter is specified, otherwise gp2 +- `"StorageThroughput"`: The storage throughput value for the DB instance. This setting + applies only to the gp3 storage type. This setting doesn't apply to Amazon Aurora or RDS + Custom DB instances. +- `"StorageType"`: The storage type to associate with the DB instance. If you specify + Provisioned IOPS (io1), you must also include a value for the Iops parameter. If you choose + to migrate your DB instance from using standard storage to using Provisioned IOPS, or from + using Provisioned IOPS to using standard storage, the process can take time. The duration + of the migration depends on several factors such as database load, storage size, storage + type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of + prior scale storage operations. Typical migration times are under 24 hours, but the process + can take up to several days in some cases. During the migration, the DB instance is + available for use, but might experience performance degradation. While the migration takes + place, nightly backups for the instance are suspended. No other Amazon RDS operations can + take place for the instance, including modifying the instance, rebooting the instance, + deleting the instance, creating a read replica for the instance, and creating a DB snapshot + of the instance. Valid Values: gp2 | gp3 | io1 | standard Default: io1, if the Iops + parameter is specified. Otherwise, gp2. - `"TdeCredentialArn"`: The ARN from the key store with which to associate the instance for - TDE encryption. This setting doesn't apply to RDS Custom. + TDE encryption. This setting doesn't apply to RDS Custom DB instances. - `"TdeCredentialPassword"`: The password for the given ARN from the key store in order to - access the device. This setting doesn't apply to RDS Custom. -- `"UseDefaultProcessorFeatures"`: A value that indicates whether the DB instance class of - the DB instance uses its default processor features. This setting doesn't apply to RDS - Custom. -- `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to authorize on this DB - instance. This change is asynchronously applied as soon as possible. This setting doesn't - apply to RDS Custom. Amazon Aurora Not applicable. The associated list of EC2 VPC - security groups is managed by the DB cluster. For more information, see ModifyDBCluster. - Constraints: If supplied, must match existing VpcSecurityGroupIds. + access the device. This setting doesn't apply to RDS Custom DB instances. +- `"UseDefaultProcessorFeatures"`: Specifies whether the DB instance class of the DB + instance uses its default processor features. This setting doesn't apply to RDS Custom DB + instances. +- `"VpcSecurityGroupIds"`: A list of Amazon EC2 VPC security groups to associate with this + DB instance. This change is asynchronously applied as soon as possible. This setting + doesn't apply to the following DB instances: Amazon Aurora (The associated list of EC2 + VPC security groups is managed by the DB cluster. For more information, see + ModifyDBCluster.) RDS Custom Constraints: If supplied, must match existing VPC + security group IDs. """ function modify_dbinstance( DBInstanceIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -7183,9 +7246,11 @@ MySQL, PostgreSQL, and Oracle. This command doesn't apply to RDS Custom. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EngineVersion"`: The engine version to upgrade the DB snapshot to. The following are the database engines and engine versions that are available when you upgrade a DB snapshot. - MySQL 5.5.46 (supported for 5.1 DB snapshots) Oracle 12.1.0.2.v8 (supported for - 12.1.0.1 DB snapshots) 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots) - 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) PostgreSQL For the list of engine + MySQL 5.5.46 (supported for 5.1 DB snapshots) Oracle + 19.0.0.0.ru-2022-01.rur-2022-01.r1 (supported for 12.2.0.1 DB snapshots) + 19.0.0.0.ru-2022-07.rur-2022-07.r1 (supported for 12.1.0.2 DB snapshots) 12.1.0.2.v8 + (supported for 12.1.0.1 DB snapshots) 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots) + 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) PostgreSQL For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS. - `"OptionGroupName"`: The option group to identify with the upgraded DB snapshot. You can @@ -7409,38 +7474,37 @@ end modify_global_cluster() modify_global_cluster(params::Dict{String,<:Any}) -Modify a setting for an Amazon Aurora global cluster. You can change one or more database +Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora -User Guide. This action only applies to Aurora DB clusters. +User Guide. This operation only applies to Aurora global database clusters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"AllowMajorVersionUpgrade"`: A value that indicates whether major version upgrades are - allowed. Constraints: You must allow major version upgrades when specifying a value for the - EngineVersion parameter that is a different major version than the DB cluster's current - version. If you upgrade the major version of a global database, the cluster and DB instance - parameter groups are set to the default parameter groups for the new version. Apply any - custom parameter groups after completing the upgrade. -- `"DeletionProtection"`: Indicates if the global database cluster has deletion protection - enabled. The global database cluster can't be deleted when deletion protection is enabled. +- `"AllowMajorVersionUpgrade"`: Specifies whether to allow major version upgrades. + Constraints: Must be enabled if you specify a value for the EngineVersion parameter that's + a different major version than the global cluster's current version. If you upgrade the + major version of a global database, the cluster and DB instance parameter groups are set to + the default parameter groups for the new version. Apply any custom parameter groups after + completing the upgrade. +- `"DeletionProtection"`: Specifies whether to enable deletion protection for the global + database cluster. The global database cluster can't be deleted when deletion protection is + enabled. - `"EngineVersion"`: The version number of the database engine to which you want to - upgrade. Changing this parameter results in an outage. The change is applied during the - next maintenance window unless ApplyImmediately is enabled. To list all of the available - engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the - following command: aws rds describe-db-engine-versions --engine aurora-mysql --query - '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' To list all of the available - engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use - the following command: aws rds describe-db-engine-versions --engine aurora-postgresql - --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' -- `"GlobalClusterIdentifier"`: The DB cluster identifier for the global cluster being - modified. This parameter isn't case-sensitive. Constraints: Must match the identifier of - an existing global database cluster. + upgrade. To list all of the available engine versions for aurora-mysql (for MySQL-based + Aurora global databases), use the following command: aws rds describe-db-engine-versions + --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' + To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based + Aurora global databases), use the following command: aws rds describe-db-engine-versions + --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == + `true`].[EngineVersion]' +- `"GlobalClusterIdentifier"`: The cluster identifier for the global cluster to modify. + This parameter isn't case-sensitive. Constraints: Must match the identifier of an + existing global database cluster. - `"NewGlobalClusterIdentifier"`: The new cluster identifier for the global database - cluster when modifying a global database cluster. This value is stored as a lowercase - string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens The first - character must be a letter Can't end with a hyphen or contain two consecutive hyphens - Example: my-cluster2 + cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to + 63 letters, numbers, or hyphens. The first character must be a letter. Can't end with a + hyphen or contain two consecutive hyphens. Example: my-cluster2 """ function modify_global_cluster(; aws_config::AbstractAWSConfig=global_aws_config()) return rds( @@ -8828,8 +8892,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. For more information, see Kerberos Authentication in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Constraints: Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS @@ -9251,8 +9328,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Microsoft SQL Server, Oracle, and PostgreSQL DB instances in an Active Directory Domain. This setting doesn't apply to RDS Custom. For more information, see Kerberos Authentication in the Amazon RDS User Guide. -- `"DomainIAMRoleName"`: Specify the name of the IAM role to be used when making API calls - to the Directory Service. This setting doesn't apply to RDS Custom. +- `"DomainAuthSecretArn"`: The ARN for the Secrets Manager secret with the credentials for + the user joining the domain. Constraints: Can't be longer than 64 characters. Example: + arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456 +- `"DomainDnsIps"`: The IPv4 DNS IP addresses of your primary and secondary Active + Directory domain controllers. Constraints: Two IP addresses must be provided. If there + isn't a secondary domain controller, use the IP address of the primary domain controller + for both entries in the list. Example: 123.124.125.126,234.235.236.237 +- `"DomainFqdn"`: The fully qualified domain name (FQDN) of an Active Directory domain. + Constraints: Can't be longer than 64 characters. Example: + mymanagedADtest.mymanagedAD.mydomain +- `"DomainIAMRoleName"`: The name of the IAM role to use when making API calls to the + Directory Service. This setting doesn't apply to RDS Custom DB instances. +- `"DomainOu"`: The Active Directory organizational unit for your DB instance to join. + Constraints: Must be in the distinguished name format. Can't be longer than 64 + characters. Example: OU=mymanagedADtestOU,DC=mymanagedADtest,DC=mymanagedAD,DC=mydomain - `"EnableCloudwatchLogsExports"`: The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS @@ -9930,18 +10020,18 @@ end Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon -RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using +RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide. # Arguments -- `blue_green_deployment_identifier`: The blue/green deployment identifier. Constraints: - Must match an existing blue/green deployment identifier. +- `blue_green_deployment_identifier`: The unique identifier of the blue/green deployment. + Constraints: Must match an existing blue/green deployment identifier. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SwitchoverTimeout"`: The amount of time, in seconds, for the switchover to complete. - The default is 300. If the switchover takes longer than the specified duration, then any - changes are rolled back, and no changes are made to the environments. + Default: 300 If the switchover takes longer than the specified duration, then any changes + are rolled back, and no changes are made to the environments. """ function switchover_blue_green_deployment( BlueGreenDeploymentIdentifier; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/redshift.jl b/src/services/redshift.jl index c873961ab7..fd8e607d2c 100644 --- a/src/services/redshift.jl +++ b/src/services/redshift.jl @@ -328,8 +328,8 @@ Amazon Redshift Cluster Management Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SnapshotArn"`: The Amazon Resource Name (ARN) of the snapshot to authorize access to. - `"SnapshotClusterIdentifier"`: The identifier of the cluster the snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. This parameter is required if your IAM user has a policy containing a snapshot + resource element that specifies anything other than * for the cluster name. - `"SnapshotIdentifier"`: The identifier of the snapshot the account is authorized to restore. """ @@ -513,9 +513,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. The default value is -1. - `"SourceSnapshotClusterIdentifier"`: The identifier of the cluster the source snapshot - was created from. This parameter is required if your IAM user or role has a policy - containing a snapshot resource element that specifies anything other than * for the cluster - name. Constraints: Must be the identifier for a valid cluster. + was created from. This parameter is required if your IAM user has a policy containing a + snapshot resource element that specifies anything other than * for the cluster name. + Constraints: Must be the identifier for a valid cluster. """ function copy_cluster_snapshot( SourceSnapshotIdentifier, @@ -623,16 +623,16 @@ Redshift Cluster Management Guide. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Must be unique for all clusters within an Amazon Web Services account. Example: myexamplecluster -- `master_user_password`: The password associated with the admin user for the cluster that - is being created. Constraints: Must be between 8 and 64 characters in length. Must - contain at least one uppercase letter. Must contain at least one lowercase letter. Must - contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' - (single quote), \" (double quote), , /, or @. -- `master_username`: The user name associated with the admin user for the cluster that is - being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. The user - name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, plus - sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. - Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of +- `master_user_password`: The password associated with the admin user account for the + cluster that is being created. Constraints: Must be between 8 and 64 characters in + length. Must contain at least one uppercase letter. Must contain at least one lowercase + letter. Must contain one number. Can be any printable ASCII character (ASCII code + 33-126) except ' (single quote), \" (double quote), , /, or @. +- `master_username`: The user name associated with the admin user account for the cluster + that is being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. + The user name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, + plus sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. + Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide. - `node_type`: The node type to be provisioned for the cluster. For information about node @@ -1044,6 +1044,63 @@ function create_cluster_subnet_group( ) end +""" + create_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name) + create_custom_domain_association(cluster_identifier, custom_domain_certificate_arn, custom_domain_name, params::Dict{String,<:Any}) + +Used to create a custom domain name for a cluster. Properties include the custom domain +name, the cluster the custom domain is associated with, and the certificate Amazon Resource +Name (ARN). + +# Arguments +- `cluster_identifier`: The cluster identifier that the custom domain is associated with. +- `custom_domain_certificate_arn`: The certificate Amazon Resource Name (ARN) for the + custom domain name association. +- `custom_domain_name`: The custom domain name for a custom domain association. + +""" +function create_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateCustomDomainAssociation", + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_custom_domain_association( + ClusterIdentifier, + CustomDomainCertificateArn, + CustomDomainName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "CreateCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterIdentifier" => ClusterIdentifier, + "CustomDomainCertificateArn" => CustomDomainCertificateArn, + "CustomDomainName" => CustomDomainName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_endpoint_access(endpoint_name, subnet_group_name) create_endpoint_access(endpoint_name, subnet_group_name, params::Dict{String,<:Any}) @@ -1862,7 +1919,7 @@ authorizations before you can delete the snapshot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"SnapshotClusterIdentifier"`: The unique identifier of the cluster the snapshot was - created from. This parameter is required if your IAM user or role has a policy containing a + created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the name of valid cluster. """ @@ -1932,6 +1989,44 @@ function delete_cluster_subnet_group( ) end +""" + delete_custom_domain_association(cluster_identifier) + delete_custom_domain_association(cluster_identifier, params::Dict{String,<:Any}) + +Contains information about deleting a custom domain association for a cluster. + +# Arguments +- `cluster_identifier`: The identifier of the cluster to delete a custom domain association + for. + +""" +function delete_custom_domain_association( + ClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DeleteCustomDomainAssociation", + Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_custom_domain_association( + ClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "DeleteCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_endpoint_access(endpoint_name) delete_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -2669,7 +2764,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum - 20, maximum 500. + 20, maximum 100. - `"OwnerAccount"`: The Amazon Web Services account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your Amazon Web Services account, or do not @@ -2897,6 +2992,40 @@ function describe_clusters( ) end +""" + describe_custom_domain_associations() + describe_custom_domain_associations(params::Dict{String,<:Any}) + +Contains information for custom domain associations for a cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CustomDomainCertificateArn"`: The certificate Amazon Resource Name (ARN) for the custom + domain association. +- `"CustomDomainName"`: The custom domain name for the custom domain association. +- `"Marker"`: The marker for the custom domain association. +- `"MaxRecords"`: The maximum records setting for the associated custom domain. +""" +function describe_custom_domain_associations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeCustomDomainAssociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_custom_domain_associations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "DescribeCustomDomainAssociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_data_shares() describe_data_shares(params::Dict{String,<:Any}) @@ -4323,8 +4452,8 @@ function enable_snapshot_copy( end """ - get_cluster_credentials(cluster_identifier, db_user) - get_cluster_credentials(cluster_identifier, db_user, params::Dict{String,<:Any}) + get_cluster_credentials(db_user) + get_cluster_credentials(db_user, params::Dict{String,<:Any}) Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with @@ -4344,8 +4473,6 @@ specified, the IAM policy must allow access to the resource dbname for the speci database name. # Arguments -- `cluster_identifier`: The unique identifier of the cluster that contains the database for - which you are requesting credentials. This parameter is case sensitive. - `db_user`: The name of a database user. If a user name matching DbUser exists in the database, the temporary user credentials have the same permissions as the existing user. If DbUser doesn't exist in the database and Autocreate is True, a new user is created using @@ -4363,6 +4490,9 @@ database name. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoCreate"`: Create a database user with the name specified for the user named in DbUser if one does not exist. +- `"ClusterIdentifier"`: The unique identifier of the cluster that contains the database + for which you are requesting credentials. This parameter is case sensitive. +- `"CustomDomainName"`: The custom domain name for the cluster credentials. - `"DbGroups"`: A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC. Database group name @@ -4381,41 +4511,28 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DurationSeconds"`: The number of seconds until the returned temporary password expires. Constraint: minimum 900, maximum 3600. Default: 900 """ -function get_cluster_credentials( - ClusterIdentifier, DbUser; aws_config::AbstractAWSConfig=global_aws_config() -) +function get_cluster_credentials(DbUser; aws_config::AbstractAWSConfig=global_aws_config()) return redshift( "GetClusterCredentials", - Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier, "DbUser" => DbUser); + Dict{String,Any}("DbUser" => DbUser); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_cluster_credentials( - ClusterIdentifier, - DbUser, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + DbUser, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( "GetClusterCredentials", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ClusterIdentifier" => ClusterIdentifier, "DbUser" => DbUser - ), - params, - ), - ); + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("DbUser" => DbUser), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end """ - get_cluster_credentials_with_iam(cluster_identifier) - get_cluster_credentials_with_iam(cluster_identifier, params::Dict{String,<:Any}) + get_cluster_credentials_with_iam() + get_cluster_credentials_with_iam(params::Dict{String,<:Any}) Returns a database user name and temporary password with temporary authorization to log in to an Amazon Redshift database. The database user is mapped 1:1 to the source Identity and @@ -4426,12 +4543,11 @@ operation must have an IAM policy attached that allows access to all necessary a resources. For more information about permissions, see Using identity-based policies (IAM policies) in the Amazon Redshift Cluster Management Guide. -# Arguments -- `cluster_identifier`: The unique identifier of the cluster that contains the database for - which you are requesting credentials. - # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClusterIdentifier"`: The unique identifier of the cluster that contains the database + for which you are requesting credentials. +- `"CustomDomainName"`: The custom domain name for the IAM message cluster credentials. - `"DbName"`: The name of the database for which you are requesting credentials. If the database name is specified, the IAM policy must allow access to the resource dbname for the specified database name. If the database name is not specified, access to all databases is @@ -4439,28 +4555,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DurationSeconds"`: The number of seconds until the returned temporary password expires. Range: 900-3600. Default: 900. """ -function get_cluster_credentials_with_iam( - ClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +function get_cluster_credentials_with_iam(; + aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( - "GetClusterCredentialsWithIAM", - Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier); + "GetClusterCredentialsWithIAM"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function get_cluster_credentials_with_iam( - ClusterIdentifier, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift( "GetClusterCredentialsWithIAM", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier), params - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -4741,11 +4850,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Operations never return the - password, so this operation provides a way to regain access to the admin user for a cluster - if the password is lost. Default: Uses existing setting. Constraints: Must be between 8 - and 64 characters in length. Must contain at least one uppercase letter. Must contain - at least one lowercase letter. Must contain one number. Can be any printable ASCII - character (ASCII code 33-126) except ' (single quote), \" (double quote), , /, or @. + password, so this operation provides a way to regain access to the admin user account for a + cluster if the password is lost. Default: Uses existing setting. Constraints: Must be + between 8 and 64 characters in length. Must contain at least one uppercase letter. Must + contain at least one lowercase letter. Must contain one number. Can be any printable + ASCII character (ASCII code 33-126) except ' (single quote), \" (double quote), , /, or @. - `"NewClusterIdentifier"`: The new identifier for the cluster. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. Alphabetic characters must be lowercase. First character must be a letter. Cannot end with a hyphen or contain two consecutive @@ -5152,6 +5261,49 @@ function modify_cluster_subnet_group( ) end +""" + modify_custom_domain_association(cluster_identifier) + modify_custom_domain_association(cluster_identifier, params::Dict{String,<:Any}) + +Contains information for changing a custom domain association. + +# Arguments +- `cluster_identifier`: The identifier of the cluster to change a custom domain association + for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CustomDomainCertificateArn"`: The certificate Amazon Resource Name (ARN) for the + changed custom domain association. +- `"CustomDomainName"`: The custom domain name for a changed custom domain association. +""" +function modify_custom_domain_association( + ClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return redshift( + "ModifyCustomDomainAssociation", + Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_custom_domain_association( + ClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return redshift( + "ModifyCustomDomainAssociation", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ClusterIdentifier" => ClusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_endpoint_access(endpoint_name) modify_endpoint_access(endpoint_name, params::Dict{String,<:Any}) @@ -5845,8 +5997,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys message to restore from a cluster. You must specify this parameter or snapshotIdentifier, but not both. - `"SnapshotClusterIdentifier"`: The name of the cluster the source snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. This parameter is required if your IAM user has a policy containing a snapshot + resource element that specifies anything other than * for the cluster name. - `"SnapshotIdentifier"`: The name of the snapshot from which to create the new cluster. This parameter isn't case sensitive. You must specify this parameter or snapshotArn, but not both. Example: my-snapshot-id @@ -6111,8 +6263,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SnapshotArn"`: The Amazon Resource Name (ARN) of the snapshot associated with the message to revoke access. - `"SnapshotClusterIdentifier"`: The identifier of the cluster the snapshot was created - from. This parameter is required if your IAM user or role has a policy containing a - snapshot resource element that specifies anything other than * for the cluster name. + from. This parameter is required if your IAM user has a policy containing a snapshot + resource element that specifies anything other than * for the cluster name. - `"SnapshotIdentifier"`: The identifier of the snapshot that the account can no longer access. """ diff --git a/src/services/route53resolver.jl b/src/services/route53resolver.jl index 328480383a..e0a080800e 100644 --- a/src/services/route53resolver.jl +++ b/src/services/route53resolver.jl @@ -436,6 +436,75 @@ function create_firewall_rule_group( ) end +""" + create_outpost_resolver(creator_request_id, name, outpost_arn, preferred_instance_type) + create_outpost_resolver(creator_request_id, name, outpost_arn, preferred_instance_type, params::Dict{String,<:Any}) + +Creates an Route 53 Resolver on an Outpost. + +# Arguments +- `creator_request_id`: A unique string that identifies the request and that allows failed + requests to be retried without the risk of running the operation twice. CreatorRequestId + can be any unique string, for example, a date/time stamp. +- `name`: A friendly name that lets you easily find a configuration in the Resolver + dashboard in the Route 53 console. +- `outpost_arn`: The Amazon Resource Name (ARN) of the Outpost. If you specify this, you + must also specify a value for the PreferredInstanceType. +- `preferred_instance_type`: The Amazon EC2 instance type. If you specify this, you must + also specify a value for the OutpostArn. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceCount"`: Number of Amazon EC2 instances for the Resolver on Outpost. The + default and minimal value is 4. +- `"Tags"`: A string that helps identify the Route 53 Resolvers on Outpost. +""" +function create_outpost_resolver( + CreatorRequestId, + Name, + OutpostArn, + PreferredInstanceType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53resolver( + "CreateOutpostResolver", + Dict{String,Any}( + "CreatorRequestId" => CreatorRequestId, + "Name" => Name, + "OutpostArn" => OutpostArn, + "PreferredInstanceType" => PreferredInstanceType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_outpost_resolver( + CreatorRequestId, + Name, + OutpostArn, + PreferredInstanceType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route53resolver( + "CreateOutpostResolver", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "CreatorRequestId" => CreatorRequestId, + "Name" => Name, + "OutpostArn" => OutpostArn, + "PreferredInstanceType" => PreferredInstanceType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_resolver_endpoint(creator_request_id, direction, ip_addresses, security_group_ids) create_resolver_endpoint(creator_request_id, direction, ip_addresses, security_group_ids, params::Dict{String,<:Any}) @@ -466,7 +535,11 @@ service for a VPC to your network. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Name"`: A friendly name that lets you easily find a configuration in the Resolver dashboard in the Route 53 console. -- `"ResolverEndpointType"`: For the endpoint type you can choose either IPv4, IPv6. or +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. If you specify this, you + must also specify a value for the PreferredInstanceType. +- `"PreferredInstanceType"`: The instance type. If you specify this, you must also specify + a value for the OutpostArn. +- `"ResolverEndpointType"`: For the endpoint type you can choose either IPv4, IPv6, or dual-stack. A dual-stack endpoint means that it will resolve via both IPv4 and IPv6. This endpoint type is applied to all IP addresses. - `"Tags"`: A list of the tag keys and values that you want to associate with the endpoint. @@ -625,8 +698,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys route DNS queries to the IP addresses that you specify in TargetIps. - `"Tags"`: A list of the tag keys and values that you want to associate with the endpoint. - `"TargetIps"`: The IPs that you want Resolver to forward DNS queries to. You can specify - only IPv4 addresses. Separate IP addresses with a space. TargetIps is available only when - the value of Rule type is FORWARD. + either Ipv4 or Ipv6 addresses but not both in the same rule. Separate IP addresses with a + space. TargetIps is available only when the value of Rule type is FORWARD. """ function create_resolver_rule( CreatorRequestId, @@ -799,6 +872,35 @@ function delete_firewall_rule_group( ) end +""" + delete_outpost_resolver(id) + delete_outpost_resolver(id, params::Dict{String,<:Any}) + +Deletes a Resolver on the Outpost. + +# Arguments +- `id`: A unique string that identifies the Resolver on the Outpost. + +""" +function delete_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "DeleteOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "DeleteOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_resolver_endpoint(resolver_endpoint_id) delete_resolver_endpoint(resolver_endpoint_id, params::Dict{String,<:Any}) @@ -1306,6 +1408,36 @@ function get_firewall_rule_group_policy( ) end +""" + get_outpost_resolver(id) + get_outpost_resolver(id, params::Dict{String,<:Any}) + +Gets information about a specified Resolver on the Outpost, such as its instance count and +type, name, and the current status of the Resolver. + +# Arguments +- `id`: The ID of the Resolver on the Outpost. + +""" +function get_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "GetOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "GetOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_resolver_config(resource_id) get_resolver_config(resource_id, params::Dict{String,<:Any}) @@ -1984,6 +2116,37 @@ function list_firewall_rules( ) end +""" + list_outpost_resolvers() + list_outpost_resolvers(params::Dict{String,<:Any}) + +Lists all the Resolvers on Outposts that were created using the current Amazon Web Services +account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of Resolvers on the Outpost that you want to return in + the response to a ListOutpostResolver request. If you don't specify a value for MaxResults, + the request returns up to 100 Resolvers. +- `"NextToken"`: For the first ListOutpostResolver request, omit this value. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +""" +function list_outpost_resolvers(; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "ListOutpostResolvers"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_outpost_resolvers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "ListOutpostResolvers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_resolver_configs() list_resolver_configs(params::Dict{String,<:Any}) @@ -2451,7 +2614,6 @@ operations that you want the account to be able to perform on the configuration. can specify the following operations in the Actions section of the statement: route53resolver:AssociateResolverQueryLogConfig route53resolver:DisassociateResolverQueryLogConfig - route53resolver:ListResolverQueryLogConfigAssociations route53resolver:ListResolverQueryLogConfigs In the Resource section of the statement, you specify the ARNs for the query logging configurations that you want to share with the account that you specified in Arn. @@ -2874,6 +3036,41 @@ function update_firewall_rule_group_association( ) end +""" + update_outpost_resolver(id) + update_outpost_resolver(id, params::Dict{String,<:Any}) + +You can use UpdateOutpostResolver to update the instance count, type, or name of a Resolver +on an Outpost. + +# Arguments +- `id`: A unique string that identifies Resolver on an Outpost. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"InstanceCount"`: The Amazon EC2 instance count for a Resolver on the Outpost. +- `"Name"`: Name of the Resolver on the Outpost. +- `"PreferredInstanceType"`: Amazon EC2 instance type. +""" +function update_outpost_resolver(Id; aws_config::AbstractAWSConfig=global_aws_config()) + return route53resolver( + "UpdateOutpostResolver", + Dict{String,Any}("Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_outpost_resolver( + Id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return route53resolver( + "UpdateOutpostResolver", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Id" => Id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_resolver_config(autodefined_reverse_flag, resource_id) update_resolver_config(autodefined_reverse_flag, resource_id, params::Dict{String,<:Any}) @@ -2987,8 +3184,10 @@ only update between IPV4 and DUALSTACK, IPV6 endpoint type can't be updated to o Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Name"`: The name of the Resolver endpoint that you want to update. - `"ResolverEndpointType"`: Specifies the endpoint type for what type of IP address the - endpoint uses to forward DNS queries. -- `"UpdateIpAddresses"`: Updates the Resolver endpoint type to IpV4, Ipv6, or dual-stack. + endpoint uses to forward DNS queries. Updating to IPV6 type isn't currently supported. +- `"UpdateIpAddresses"`: Specifies the IPv6 address when you update the Resolver endpoint + from IPv4 to dual-stack. If you don't specify an IPv6 address, one will be automatically + chosen from your subnet. """ function update_resolver_endpoint( ResolverEndpointId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/route_53.jl b/src/services/route_53.jl index e082273c5e..68a0f14eea 100644 --- a/src/services/route_53.jl +++ b/src/services/route_53.jl @@ -190,12 +190,12 @@ set, see \"Examples.\" Don't refer to the syntax in the \"Parameter Syntax\" sec includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets. Change Propagation to Route 53 DNS Servers When you submit a ChangeResourceRecordSets request, Route 53 propagates your -changes to all of the Route 53 authoritative DNS servers. While your changes are -propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange -returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within -60 seconds. For more information, see GetChange. Limits on ChangeResourceRecordSets -Requests For information about the limits on a ChangeResourceRecordSets request, see -Limits in the Amazon Route 53 Developer Guide. +changes to all of the Route 53 authoritative DNS servers managing the hosted zone. While +your changes are propagating, GetChange returns a status of PENDING. When propagation is +complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 +name servers managing the hosted zone within 60 seconds. For more information, see +GetChange. Limits on ChangeResourceRecordSets Requests For information about the limits +on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide. # Arguments - `change_batch`: A complex type that contains an optional comment and the Changes element. @@ -447,6 +447,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DelegationSetId"`: If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet. + If you are using a reusable delegation set to create a public hosted zone for a subdomain, + make sure that the parent hosted zone doesn't use one or more of the same name servers. If + you have overlapping nameservers, the operation will cause a ConflictingDomainsExist error. - `"HostedZoneConfig"`: (Optional) A complex type that contains the following optional values: For public and private hosted zones, an optional comment For private hosted zones, an optional PrivateZone element If you don't specify a comment or the PrivateZone @@ -808,6 +811,11 @@ specified traffic policy version. In addition, CreateTrafficPolicyInstance assoc resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created. +After you submit an CreateTrafficPolicyInstance request, there's a brief delay while Amazon +Route 53 creates the resource record sets that are specified in the traffic policy +definition. Use GetTrafficPolicyInstance with the id of new traffic policy instance to +confirm that the CreateTrafficPolicyInstance request completed successfully. For more +information, see the State response element. # Arguments - `hosted_zone_id`: The ID of the hosted zone that you want Amazon Route 53 to create @@ -1534,8 +1542,9 @@ end Returns the current status of a change batch request. The status is one of the following values: PENDING indicates that the changes in this request have not propagated to all -Amazon Route 53 DNS servers. This is the initial status of all change batch requests. -INSYNC indicates that the changes have propagated to all Route 53 DNS servers. +Amazon Route 53 DNS servers managing the hosted zone. This is the initial status of all +change batch requests. INSYNC indicates that the changes have propagated to all Route 53 +DNS servers managing the hosted zone. # Arguments - `id`: The ID of the change batch request. The value that you specify here is the value @@ -2054,11 +2063,11 @@ end get_traffic_policy_instance(id) get_traffic_policy_instance(id, params::Dict{String,<:Any}) -Gets information about a specified traffic policy instance. After you submit a -CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief -delay while Amazon Route 53 creates the resource record sets that are specified in the -traffic policy definition. For more information, see the State response element. In the -Route 53 console, traffic policy instances are known as policy records. +Gets information about a specified traffic policy instance. Use GetTrafficPolicyInstance +with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance +or an UpdateTrafficPolicyInstance request completed successfully. For more information, see +the State response element. In the Route 53 console, traffic policy instances are known +as policy records. # Arguments - `id`: The ID of the traffic policy instance that you want to get information about. @@ -3098,7 +3107,9 @@ end Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask. This call only supports querying public -hosted zones. +hosted zones. The TestDnsAnswer returns information similar to what you would expect from +the answer section of the dig command. Therefore, if you query for the name servers of a +subdomain that point to the parent name servers, those will not be returned. # Arguments - `hostedzoneid`: The ID of the hosted zone that you want Amazon Route 53 to simulate a @@ -3421,17 +3432,22 @@ end update_traffic_policy_instance(id, ttl, traffic_policy_id, traffic_policy_version) update_traffic_policy_instance(id, ttl, traffic_policy_id, traffic_policy_version, params::Dict{String,<:Any}) -Updates the resource record sets in a specified hosted zone that were created based on the -settings in a specified traffic policy version. When you update a traffic policy instance, -Amazon Route 53 continues to respond to DNS queries for the root resource record set name -(such as example.com) while it replaces one group of resource record sets with another. -Route 53 performs the following operations: Route 53 creates a new group of resource -record sets based on the specified traffic policy. This is true regardless of how -significant the differences are between the existing resource record sets and the new -resource record sets. When all of the new resource record sets have been created, Route -53 starts to respond to DNS queries for the root resource record set name (such as -example.com) by using the new resource record sets. Route 53 deletes the old group of -resource record sets that are associated with the root resource record set name. + After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while +Route 53 creates the resource record sets that are specified in the traffic policy +definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance +confirm that the UpdateTrafficPolicyInstance request completed successfully. For more +information, see the State response element. Updates the resource record sets in a +specified hosted zone that were created based on the settings in a specified traffic policy +version. When you update a traffic policy instance, Amazon Route 53 continues to respond to +DNS queries for the root resource record set name (such as example.com) while it replaces +one group of resource record sets with another. Route 53 performs the following operations: + Route 53 creates a new group of resource record sets based on the specified traffic +policy. This is true regardless of how significant the differences are between the existing +resource record sets and the new resource record sets. When all of the new resource +record sets have been created, Route 53 starts to respond to DNS queries for the root +resource record set name (such as example.com) by using the new resource record sets. +Route 53 deletes the old group of resource record sets that are associated with the root +resource record set name. # Arguments - `id`: The ID of the traffic policy instance that you want to update. diff --git a/src/services/route_53_domains.jl b/src/services/route_53_domains.jl index 722d025da1..7db27313ed 100644 --- a/src/services/route_53_domains.jl +++ b/src/services/route_53_domains.jl @@ -878,24 +878,22 @@ end register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -This operation registers a domain. Domains are registered either by Amazon Registrar (for -.com, .net, and .org domains) or by our registrar associate, Gandi (for all other domains). -For some top-level domains (TLDs), this operation requires extra parameters. When you -register a domain, Amazon Route 53 does the following: Creates a Route 53 hosted zone -that has the same name as the domain. Route 53 assigns four name servers to your hosted -zone and automatically updates your domain registration with the names of these name -servers. Enables auto renew, so your domain registration will renew automatically each -year. We'll notify you in advance of the renewal date so you can choose whether to renew -the registration. Optionally enables privacy protection, so WHOIS queries return contact -information either for Amazon Registrar (for .com, .net, and .org domains) or for our -registrar associate, Gandi (for all other TLDs). If you don't enable privacy protection, -WHOIS queries return the information that you entered for the administrative, registrant, -and technical contacts. You must specify the same privacy setting for the administrative, -registrant, and technical contacts. If registration is successful, returns an operation -ID that you can use to track the progress and completion of the action. If the request is -not completed successfully, the domain registrant is notified by email. Charges your -Amazon Web Services account an amount based on the top-level domain. For more information, -see Amazon Route 53 Pricing. +This operation registers a domain. For some top-level domains (TLDs), this operation +requires extra parameters. When you register a domain, Amazon Route 53 does the following: + Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four +name servers to your hosted zone and automatically updates your domain registration with +the names of these name servers. Enables auto renew, so your domain registration will +renew automatically each year. We'll notify you in advance of the renewal date so you can +choose whether to renew the registration. Optionally enables privacy protection, so WHOIS +queries return contact for the registrar or the phrase \"REDACTED FOR PRIVACY\", or \"On +behalf of <domain name> owner.\" If you don't enable privacy protection, WHOIS +queries return the information that you entered for the administrative, registrant, and +technical contacts. While some domains may allow different privacy settings per contact, +we recommend specifying the same privacy setting for all contacts. If registration is +successful, returns an operation ID that you can use to track the progress and completion +of the action. If the request is not completed successfully, the domain registrant is +notified by email. Charges your Amazon Web Services account an amount based on the +top-level domain. For more information, see Amazon Route 53 Pricing. # Arguments - `admin_contact`: Provides detailed contact information. For information about the values @@ -1199,17 +1197,15 @@ end transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) transfer_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) -Transfers a domain from another registrar to Amazon Route 53. When the transfer is -complete, the domain is registered either with Amazon Registrar (for .com, .net, and .org -domains) or with our registrar associate, Gandi (for all other TLDs). For more information -about transferring domains, see the following topics: For transfer requirements, a -detailed procedure, and information about viewing the status of a domain that you're -transferring to Route 53, see Transferring Registration for a Domain to Amazon Route 53 in -the Amazon Route 53 Developer Guide. For information about how to transfer a domain from -one Amazon Web Services account to another, see TransferDomainToAnotherAwsAccount. For -information about how to transfer a domain to another domain registrar, see Transferring a -Domain from Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. -If the registrar for your domain is also the DNS service provider for the domain, we highly +Transfers a domain from another registrar to Amazon Route 53. For more information about +transferring domains, see the following topics: For transfer requirements, a detailed +procedure, and information about viewing the status of a domain that you're transferring to +Route 53, see Transferring Registration for a Domain to Amazon Route 53 in the Amazon Route +53 Developer Guide. For information about how to transfer a domain from one Amazon Web +Services account to another, see TransferDomainToAnotherAwsAccount. For information +about how to transfer a domain to another domain registrar, see Transferring a Domain from +Amazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide. If the +registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar @@ -1246,10 +1242,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Nameservers"`: Contains details for the host and glue IP addresses. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information - either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar - associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the - information that you entered for the admin contact. You must specify the same privacy - setting for the administrative, registrant, and technical contacts. Default: true + for the registrar, the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain + name> owner.\". While some domains may allow different privacy settings per contact, we + recommend specifying the same privacy setting for all contacts. Default: true - `"PrivacyProtectRegistrantContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar @@ -1387,7 +1382,8 @@ domain registrant will be notified by email. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdminContact"`: Provides detailed contact information. -- `"Consent"`: Customer's consent for the owner change request. +- `"Consent"`: Customer's consent for the owner change request. Required if the domain is + not free (consent price is more than 0.00). - `"RegistrantContact"`: Provides detailed contact information. - `"TechContact"`: Provides detailed contact information. """ @@ -1421,11 +1417,11 @@ end update_domain_contact_privacy(domain_name, params::Dict{String,<:Any}) This operation updates the specified domain contact's privacy setting. When privacy -protection is enabled, contact information such as email address is replaced either with -contact information for Amazon Registrar (for .com, .net, and .org domains) or with contact -information for our registrar associate, Gandi. You must specify the same privacy setting -for the administrative, registrant, and technical contacts. This operation affects only -the contact information for the specified contact type (administrative, registrant, or +protection is enabled, your contact information is replaced with contact information for +the registrar or with the phrase \"REDACTED FOR PRIVACY\", or \"On behalf of <domain +name> owner.\" While some domains may allow different privacy settings per contact, we +recommend specifying the same privacy setting for all contacts. This operation affects +only the contact information for the specified contact type (administrative, registrant, or technical). If the request succeeds, Amazon Route 53 returns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email. By diff --git a/src/services/s3.jl b/src/services/s3.jl index 7336ea8c8d..3b2ddcb3a6 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -32,7 +32,7 @@ ListMultipartUploads the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key of the object for which the multipart upload was initiated. - `upload_id`: Upload ID that identifies the multipart upload. @@ -127,7 +127,7 @@ ListMultipartUploads the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `upload_id`: ID for the initiated multipart upload. @@ -222,85 +222,88 @@ exceptions, they return the error). If the copy is successful, you receive a res information about the copied object. If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body. The copy request charge is based on the storage class and -Region that you specify for the destination object. For pricing information, see Amazon S3 -pricing. Amazon S3 transfer acceleration does not support cross-Region copies. If you -request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad -Request error. For more information, see Transfer Acceleration. Metadata When copying an -object, you can preserve all metadata (default) or specify new metadata. However, the ACL -is not preserved and is set to private for the user making the request. To override the -default ACL setting, specify a new ACL when generating a copy request. For more -information, see Using ACLs. To specify whether you want the object metadata copied from -the source object or replaced with metadata provided in the request, you can optionally add -the x-amz-metadata-directive header. When you grant permissions, you can use the -s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects -are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 -User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, -Resources, and Condition Keys for Amazon S3. x-amz-website-redirect-location is unique to -each object and must be specified in the request headers to copy the value. -x-amz-copy-source-if Headers To only copy an object under certain conditions, such as -whether the Etag matches or whether the object was modified before or after a specified -date, use the following request parameters: x-amz-copy-source-if-match -x-amz-copy-source-if-none-match x-amz-copy-source-if-unmodified-since -x-amz-copy-source-if-modified-since If both the x-amz-copy-source-if-match and -x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as -follows, Amazon S3 returns 200 OK and copies the data: x-amz-copy-source-if-match -condition evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates to -false If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -headers are present in the request and evaluate as follows, Amazon S3 returns the 412 -Precondition Failed response code: x-amz-copy-source-if-none-match condition evaluates -to false x-amz-copy-source-if-modified-since condition evaluates to true All headers -with the x-amz- prefix, including x-amz-copy-source, must be signed. Server-side -encryption Amazon S3 automatically encrypts all new objects that are copied to an S3 -bucket. When copying an object, if you don't specify encryption information in your copy -request, the encryption setting of the target object is set to the default encryption -configuration of the destination bucket. By default, all buckets have a base level of -encryption configuration that uses server-side encryption with Amazon S3 managed keys -(SSE-S3). If the destination bucket has a default encryption configuration that uses -server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a -customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a -customer-provided key to encrypt the target object copy. When you perform a CopyObject -operation, if you want to use a different type of encryption setting for the target object, -you can use other appropriate encryption-related headers to encrypt the target object with -a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side -encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and -decrypts the data when you access it. If the encryption setting in your request is -different from the default encryption configuration of the destination bucket, the -encryption setting in your request takes precedence. If the source object for the copy is -stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in -your request so that Amazon S3 can decrypt the object for copying. For more information -about server-side encryption, see Using Server-Side Encryption. If a target object uses -SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon -S3 Bucket Keys in the Amazon S3 User Guide. Access Control List (ACL)-Specific Request -Headers When copying an object, you can optionally use headers to grant ACL-based -permissions. By default, all objects are private. Only the owner has full access control. -When adding a new object, you can grant permissions to individual Amazon Web Services -accounts or to predefined groups defined by Amazon S3. These permissions are then added to -the ACL on the object. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're copying objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. For more information, see Controlling ownership of objects and disabling ACLs in -the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for Object -Ownership, all objects written to the bucket by any account will be owned by the bucket -owner. Checksums When copying an object, if it has a checksum, that checksum will be -copied to the new object by default. When you copy the object over, you may optionally -specify a different checksum algorithm to use with the x-amz-checksum-algorithm header. -Storage Class Options You can use the CopyObject action to change the storage class of an -object that is already stored in Amazon S3 using the StorageClass parameter. For more -information, see Storage Classes in the Amazon S3 User Guide. If the source object's -storage class is GLACIER, you must restore a copy of this object before you can use it as a -source object for the copy operation. For more information, see RestoreObject. For more -information, see Copying Objects. Versioning By default, x-amz-copy-source identifies the -current version of an object to copy. If the current version is a delete marker, Amazon S3 -behaves as if the object was deleted. To copy a different version, use the versionId -subresource. If you enable versioning on the target bucket, Amazon S3 generates a unique -version ID for the object being copied. This version ID is different from the version ID of -the source object. Amazon S3 returns the version ID of the copied object in the -x-amz-version-id response header in the response. If you do not enable versioning or -suspend it on the target bucket, the version ID that Amazon S3 generates is always null. -The following operations are related to CopyObject: PutObject GetObject +Region that you specify for the destination object. The request can also result in a data +retrieval charge for the source if the source storage class bills for data retrieval. For +pricing information, see Amazon S3 pricing. Amazon S3 transfer acceleration does not +support cross-Region copies. If you request a cross-Region copy using a transfer +acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer +Acceleration. Metadata When copying an object, you can preserve all metadata (the +default) or specify new metadata. However, the access control list (ACL) is not preserved +and is set to private for the user making the request. To override the default ACL setting, +specify a new ACL when generating a copy request. For more information, see Using ACLs. To +specify whether you want the object metadata copied from the source object or replaced with +metadata provided in the request, you can optionally add the x-amz-metadata-directive +header. When you grant permissions, you can use the s3:x-amz-metadata-directive condition +key to enforce certain metadata behavior when objects are uploaded. For more information, +see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of +Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon +S3. x-amz-website-redirect-location is unique to each object and must be specified in the +request headers to copy the value. x-amz-copy-source-if Headers To only copy an object +under certain conditions, such as whether the Etag matches or whether the object was +modified before or after a specified date, use the following request parameters: +x-amz-copy-source-if-match x-amz-copy-source-if-none-match +x-amz-copy-source-if-unmodified-since x-amz-copy-source-if-modified-since If both +the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are +present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the +data: x-amz-copy-source-if-match condition evaluates to true +x-amz-copy-source-if-unmodified-since condition evaluates to false If both the +x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present +in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed +response code: x-amz-copy-source-if-none-match condition evaluates to false +x-amz-copy-source-if-modified-since condition evaluates to true All headers with the +x-amz- prefix, including x-amz-copy-source, must be signed. Server-side encryption +Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When +copying an object, if you don't specify encryption information in your copy request, the +encryption setting of the target object is set to the default encryption configuration of +the destination bucket. By default, all buckets have a base level of encryption +configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the +destination bucket has a default encryption configuration that uses server-side encryption +with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with +Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided +encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided +key to encrypt the target object copy. When you perform a CopyObject operation, if you want +to use a different type of encryption setting for the target object, you can use other +appropriate encryption-related headers to encrypt the target object with a KMS key, an +Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 +encrypts your data as it writes your data to disks in its data centers and decrypts the +data when you access it. If the encryption setting in your request is different from the +default encryption configuration of the destination bucket, the encryption setting in your +request takes precedence. If the source object for the copy is stored in Amazon S3 using +SSE-C, you must provide the necessary encryption information in your request so that Amazon +S3 can decrypt the object for copying. For more information about server-side encryption, +see Using Server-Side Encryption. If a target object uses SSE-KMS, you can enable an S3 +Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 +User Guide. Access Control List (ACL)-Specific Request Headers When copying an object, +you can optionally use headers to grant ACL-based permissions. By default, all objects are +private. Only the owner has full access control. When adding a new object, you can grant +permissions to individual Amazon Web Services accounts or to predefined groups that are +defined by Amazon S3. These permissions are then added to the ACL on the object. For more +information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API. +If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 +Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this +setting only accept PUT requests that don't specify an ACL or PUT requests that specify +bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an +equivalent form of this ACL expressed in the XML format. For more information, see +Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. If your +bucket uses the bucket owner enforced setting for Object Ownership, all objects written to +the bucket by any account will be owned by the bucket owner. Checksums When copying an +object, if it has a checksum, that checksum will be copied to the new object by default. +When you copy the object over, you can optionally specify a different checksum algorithm to +use with the x-amz-checksum-algorithm header. Storage Class Options You can use the +CopyObject action to change the storage class of an object that is already stored in Amazon +S3 by using the StorageClass parameter. For more information, see Storage Classes in the +Amazon S3 User Guide. If the source object's storage class is GLACIER, you must restore a +copy of this object before you can use it as a source object for the copy operation. For +more information, see RestoreObject. For more information, see Copying Objects. Versioning + By default, x-amz-copy-source header identifies the current version of an object to copy. +If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. +To copy a different version, use the versionId subresource. If you enable versioning on the +target bucket, Amazon S3 generates a unique version ID for the object being copied. This +version ID is different from the version ID of the source object. Amazon S3 returns the +version ID of the copied object in the x-amz-version-id response header in the response. If +you do not enable versioning or suspend it on the target bucket, the version ID that Amazon +S3 generates is always null. The following operations are related to CopyObject: +PutObject GetObject # Arguments - `bucket`: The name of the destination bucket. When using this action with an access @@ -314,7 +317,7 @@ The following operations are related to CopyObject: PutObject GetObject AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The key of the destination object. - `x-amz-copy-source`: Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object @@ -395,18 +398,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys object's Object Lock to expire. - `"x-amz-request-payer"`: - `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). -- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the Amazon Web Services KMS - key ID to use for object encryption. All GET and PUT requests for an object protected by - Amazon Web Services KMS will fail if not made via SSL or using SigV4. For information about - configuring using any of the officially supported Amazon Web Services SDKs and Amazon Web - Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon - S3 User Guide. + this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). +- `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the KMS key ID to use for + object encryption. All GET and PUT requests for an object protected by KMS will fail if + they're not made via SSL or using SigV4. For information about configuring any of the + officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying + the Signature Version in Request Authentication in the Amazon S3 User Guide. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. @@ -495,44 +497,36 @@ Region. Accordingly, the signature calculations in Signature Version 4 must use as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, -see Virtual hosting of buckets. Access control lists (ACLs) When creating a bucket using -this operation, you can optionally configure the bucket ACL to specify the accounts or -groups that should be granted specific permissions on the bucket. If your CreateBucket -request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that +see Virtual hosting of buckets. Permissions In addition to s3:CreateBucket, the +following permissions are required when your CreateBucket request includes specific +headers: Access control lists (ACLs) - If your CreateBucket request specifies access +control list (ACL) permissions and the ACL is public-read, public-read-write, +authenticated-read, or if you specify access permissions explicitly through any other ACL, +both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL for the +CreateBucket request is private or if the request doesn't specify any ACLs, only +s3:CreateBucket permission is needed. Object Lock - If ObjectLockEnabledForBucket is +set to true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and +s3:PutBucketVersioning permissions are required. S3 Object Ownership - If your +CreateBucket request includes the x-amz-object-ownership header, then the +s3:PutBucketOwnershipControls permission is required. By default, ObjectOwnership is set to +BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled, except in +uncommon use cases where you must control access for each object individually. If you want +to change the ObjectOwnership setting, you can use the x-amz-object-ownership header in +your CreateBucket request to set the ObjectOwnership setting of your choice. For more +information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 +User Guide. S3 Block Public Access - If your specific use case requires granting public +access to your S3 resources, you can disable Block Public Access. You can create a new +bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock +API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. By +default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent +exposure of your resources, we recommend keeping the S3 Block Public Access settings +enabled. For more information about S3 Block Public Access, see Blocking public access to +your Amazon S3 storage in the Amazon S3 User Guide. If your CreateBucket request +sets BucketOwnerEnforced for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400 -error and returns the InvalidBucketAclWithObjectOwnership error code. For more information, -see Controlling object ownership in the Amazon S3 User Guide. There are two ways to grant -the appropriate permissions using the request headers. Specify a canned ACL using the -x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned -ACLs. Each canned ACL has a predefined set of grantees and permissions. For more -information, see Canned ACL. Specify access permissions explicitly using the -x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and -x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 -supports in an ACL. For more information, see Access control list (ACL) overview. You -specify each grantee as a type=value pair, where the type is one of the following: id -– if the value specified is the canonical user ID of an Amazon Web Services account -uri – if you are granting permissions to a predefined group emailAddress – if the -value specified is the email address of an Amazon Web Services account Using email -addresses to specify a grantee is only supported in the following Amazon Web Services -Regions: US East (N. Virginia) US West (N. California) US West (Oregon) Asia -Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) -South America (São Paulo) For a list of all the Amazon S3 supported Regions and -endpoints, see Regions and Endpoints in the Amazon Web Services General Reference. For -example, the following x-amz-grant-read header grants the Amazon Web Services accounts -identified by account IDs permissions to read object data and its metadata: -x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" You can use either a canned -ACL or specify access permissions explicitly. You cannot do both. Permissions In -addition to s3:CreateBucket, the following permissions are required when your CreateBucket -includes specific headers: ACLs - If your CreateBucket request specifies ACL permissions -and the ACL is public-read, public-read-write, authenticated-read, or if you specify access -permissions explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl -permissions are needed. If the ACL the CreateBucket request is private or doesn't specify -any ACLs, only s3:CreateBucket permission is needed. Object Lock - If -ObjectLockEnabledForBucket is set to true in your CreateBucket request, -s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required. -S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership -header, s3:PutBucketOwnershipControls permission is required. The following operations -are related to CreateBucket: PutObject DeleteBucket +error and returns the InvalidBucketAcLWithObjectOwnership error code. For more information, +see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide. The +following operations are related to CreateBucket: PutObject DeleteBucket # Arguments - `bucket`: The name of the bucket to create. @@ -691,7 +685,7 @@ AbortMultipartUpload ListParts ListMultipartUploads the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload is to be initiated. # Optional Parameters @@ -732,15 +726,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this object in Amazon S3 (for example, AES256, aws:kms). - `"x-amz-server-side-encryption-aws-kms-key-id"`: Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for - an object protected by Amazon Web Services KMS will fail if not made via SSL or using - SigV4. For information about configuring using any of the officially supported Amazon Web - Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request - Authentication in the Amazon S3 User Guide. + an object protected by KMS will fail if they're not made via SSL or using SigV4. For + information about configuring any of the officially supported Amazon Web Services SDKs and + Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in + the Amazon S3 User Guide. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with an object action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with an object action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. @@ -881,8 +875,8 @@ end Deletes the cors configuration information set for the bucket. To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others. For information about cors, -see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. The following -operations are related to DeleteBucketCors: PutBucketCors RESTOPTIONSobject +see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide. Related Resources + PutBucketCors RESTOPTIONSobject # Arguments - `bucket`: Specifies the bucket whose cors configuration is being deleted. @@ -1402,7 +1396,7 @@ related to DeleteObject: PutObject the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key name of the object to delete. # Optional Parameters @@ -1465,7 +1459,7 @@ DeleteObjectTagging: PutObjectTagging GetObjectTagging the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The key that identifies the object in the bucket from which to remove all tags. # Optional Parameters @@ -1539,7 +1533,7 @@ ListParts AbortMultipartUpload AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `delete`: Container for the request. # Optional Parameters @@ -1657,6 +1651,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function get_bucket_accelerate_configuration( Bucket; aws_config::AbstractAWSConfig=global_aws_config() @@ -2662,58 +2657,60 @@ the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification. For more information about returning the ACL of an object, see GetObjectAcl. If the object you are retrieving is stored in the S3 Glacier -or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first -restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectState -error. For information about restoring archived objects, see Restoring Archived Objects. -Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET -requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side -encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these -types of keys, you’ll get an HTTP 400 BadRequest error. If you encrypt an object by using -server-side encryption with customer-provided encryption keys (SSE-C) when you store the -object in Amazon S3, then when you GET the object, you must use the following headers: -x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key - x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see -Server-Side Encryption (Using Customer-Provided Encryption Keys). Assuming you have the -relevant permission to read object tags, the response also returns the x-amz-tagging-count -header that provides the count of number of tags associated with the object. You can use -GetObjectTagging to retrieve the tag set associated with an object. Permissions You need -the relevant read object (or version) permission for this operation. For more information, -see Specifying Permissions in a Policy. If the object you request does not exist, the error -Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you -have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -will return an HTTP status code 403 (\"access denied\") error. Versioning By default, -the GET action returns the current version of an object. To return a different version, use -the versionId subresource. If you supply a versionId, you need the s3:GetObjectVersion -permission to access a specific version of an object. If you request a specific version, -you do not need to have the s3:GetObject permission. If you request the current version -without a specific version ID, only s3:GetObject permission is required. -s3:GetObjectVersion permission won't be required. If the current version of the object is -a delete marker, Amazon S3 behaves as if the object was deleted and includes -x-amz-delete-marker: true in the response. For more information about versioning, see -PutBucketVersioning. Overriding Response Header Values There are times when you want to -override certain response header values in a GET response. For example, you might override -the Content-Disposition response header value in your GET request. You can override values -for a set of response headers using the following query parameters. These response header -values are sent only on a successful request, that is, when status code 200 OK is returned. -The set of headers you can override using these parameters is a subset of the headers that -Amazon S3 accepts when you create an object. The response headers that you can override for -the GET response are Content-Type, Content-Language, Expires, Cache-Control, -Content-Disposition, and Content-Encoding. To override these header values in the GET -response, you use the following request parameters. You must sign the request, either -using an Authorization header or a presigned URL, when using these parameters. They cannot -be used with an unsigned (anonymous) request. response-content-type -response-content-language response-expires response-cache-control -response-content-disposition response-content-encoding Overriding Response Header -Values If both of the If-Match and If-Unmodified-Since headers are present in the request -as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition -evaluates to false; then, S3 returns 200 OK and the data requested. If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to -true; then, S3 returns 304 Not Modified response code. For more information about -conditional requests, see RFC 7232. The following operations are related to GetObject: -ListBuckets GetObjectAcl +Flexible Retrieval or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering +Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object +you must first restore a copy using RestoreObject. Otherwise, this action returns an +InvalidObjectState error. For information about restoring archived objects, see Restoring +Archived Objects. Encryption request headers, like x-amz-server-side-encryption, should not +be sent for GET requests if your object uses server-side encryption with Key Management +Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services +KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys +(SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request +error. If you encrypt an object by using server-side encryption with customer-provided +encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the +object, you must use the following headers: +x-amz-server-side-encryption-customer-algorithm +x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +Encryption Keys). Assuming you have the relevant permission to read object tags, the +response also returns the x-amz-tagging-count header that provides the count of number of +tags associated with the object. You can use GetObjectTagging to retrieve the tag set +associated with an object. Permissions You need the relevant read object (or version) +permission for this operation. For more information, see Specifying Permissions in a +Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns +depends on whether you also have the s3:ListBucket permission. If you have the +s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not +Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +status code 403 (\"access denied\") error. Versioning By default, the GET action returns +the current version of an object. To return a different version, use the versionId +subresource. If you supply a versionId, you need the s3:GetObjectVersion permission to +access a specific version of an object. If you request a specific version, you do not need +to have the s3:GetObject permission. If you request the current version without a specific +version ID, only s3:GetObject permission is required. s3:GetObjectVersion permission won't +be required. If the current version of the object is a delete marker, Amazon S3 behaves +as if the object was deleted and includes x-amz-delete-marker: true in the response. For +more information about versioning, see PutBucketVersioning. Overriding Response Header +Values There are times when you want to override certain response header values in a GET +response. For example, you might override the Content-Disposition response header value in +your GET request. You can override values for a set of response headers using the following +query parameters. These response header values are sent only on a successful request, that +is, when status code 200 OK is returned. The set of headers you can override using these +parameters is a subset of the headers that Amazon S3 accepts when you create an object. The +response headers that you can override for the GET response are Content-Type, +Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To +override these header values in the GET response, you use the following request parameters. + You must sign the request, either using an Authorization header or a presigned URL, when +using these parameters. They cannot be used with an unsigned (anonymous) request. +response-content-type response-content-language response-expires +response-cache-control response-content-disposition response-content-encoding +Overriding Response Header Values If both of the If-Match and If-Unmodified-Since headers +are present in the request as follows: If-Match condition evaluates to true, and; +If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data +requested. If both of the If-None-Match and If-Modified-Since headers are present in the +request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since +condition evaluates to true; then, S3 returns 304 Not Modified response code. For more +information about conditional requests, see RFC 7232. The following operations are +related to GetObject: ListBuckets GetObjectAcl # Arguments - `bucket`: The bucket name containing the object. When using this action with an access @@ -2729,7 +2726,7 @@ ListBuckets GetObjectAcl AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Key of the object to get. # Optional Parameters @@ -2901,10 +2898,10 @@ GetObjectTagging HeadObject ListParts AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The object key. -- `x-amz-object-attributes`: An XML header that specifies the fields at the root level that - you want returned in the response. Fields that you do not specify are not returned. +- `x-amz-object-attributes`: Specifies the fields at the root level that you want returned + in the response. Fields that you do not specify are not returned. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3142,7 +3139,7 @@ to GetObjectTagging: DeleteObjectTagging GetObjectAttributes PutObjec the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which to get the tagging information. # Optional Parameters @@ -3307,7 +3304,7 @@ InvalidAccessPointAliasError, see List of Error Codes. AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3339,30 +3336,31 @@ Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve the exception beyond these error codes. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following -headers: x-amz-server-side-encryption-customer-algorithm -x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 -For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +headers: x-amz-server-side-encryption-customer-algorithm +x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 + For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys). Encryption request headers, like x-amz-server-side-encryption, should -not be sent for GET requests if your object uses server-side encryption with KMS keys -(SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If -your object does use these types of keys, you’ll get an HTTP 400 BadRequest error. The -last modified property in this case is the creation date of the object. Request headers -are limited to 8 KB in size. For more information, see Common Request Headers. Consider the -following when using request headers: Consideration 1 – If both of the If-Match and -If-Unmodified-Since headers are present in the request as follows: If-Match condition -evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon -S3 returns 200 OK and the data requested. Consideration 2 – If both of the -If-None-Match and If-Modified-Since headers are present in the request as follows: -If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates -to true; Then Amazon S3 returns the 304 Not Modified response code. For more +not be sent for GET requests if your object uses server-side encryption with Key Management +Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services +KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys +(SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 Bad Request +error. The last modified property in this case is the creation date of the object. +Request headers are limited to 8 KB in size. For more information, see Common Request +Headers. Consider the following when using request headers: Consideration 1 – If both +of the If-Match and If-Unmodified-Since headers are present in the request as follows: +If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to +false; Then Amazon S3 returns 200 OK and the data requested. Consideration 2 – If +both of the If-None-Match and If-Modified-Since headers are present in the request as +follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition +evaluates to true; Then Amazon S3 returns the 304 Not Modified response code. For more information about conditional requests, see RFC 7232. Permissions You need the relevant read object (or version) permission for this operation. For more information, see Actions, -resources, and condition keys for Amazon S3. If the object you request does not exist, the -error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If -you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code -404 (\"no such key\") error. If you don’t have the s3:ListBucket permission, Amazon S3 -returns an HTTP status code 403 (\"access denied\") error. The following actions are -related to HeadObject: GetObject GetObjectAttributes +resources, and condition keys for Amazon S3. If the object you request doesn't exist, the +error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. + If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status +code 404 error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an +HTTP status code 403 error. The following actions are related to HeadObject: +GetObject GetObjectAttributes # Arguments - `bucket`: The name of the bucket containing the object. When using this action with an @@ -3376,7 +3374,7 @@ related to HeadObject: GetObject GetObjectAttributes AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: The object key. # Optional Parameters @@ -3698,7 +3696,7 @@ UploadPart CompleteMultipartUpload ListParts AbortMultipartUpload the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3719,7 +3717,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response. - `"prefix"`: Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can - think of using prefix to make groups in the same way you'd use a folder in a file system.) + think of using prefix to make groups in the same way that you'd use a folder in a file + system.) - `"upload-id-marker"`: Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be @@ -3728,6 +3727,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-request-payer"`: """ function list_multipart_uploads(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3752,7 +3752,7 @@ end Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object -versions. To use this operation, you must have permissions to perform the +versions. To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. To use this operation, you must have READ access @@ -3772,20 +3772,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys response. - `"encoding-type"`: - `"key-marker"`: Specifies the key to start with when listing objects in a bucket. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker. - `"prefix"`: Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You - can think of using prefix to make groups in the same way you'd use a folder in a file + can think of using prefix to make groups in the same way that you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes. - `"version-id-marker"`: Specifies the object version you want to start listing from. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. +- `"x-amz-request-payer"`: """ function list_object_versions(Bucket; aws_config::AbstractAWSConfig=global_aws_config()) return s3( @@ -3829,21 +3832,23 @@ CreateBucket ListBuckets AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"delimiter"`: A delimiter is a character you use to group keys. +- `"delimiter"`: A delimiter is a character that you use to group keys. - `"encoding-type"`: - `"marker"`: Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. Marker can be any key in the bucket. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. - `"prefix"`: Limits the response to keys that begin with the specified prefix. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. - `"x-amz-request-payer"`: Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests. @@ -3868,16 +3873,17 @@ the request parameters as selection criteria to return a subset of the objects i A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list. For more information about -listing objects, see Listing object keys programmatically To use this operation, you must -have READ access to the bucket. To use this action in an Identity and Access Management -(IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket -owner has this permission by default and can grant this permission to others. For more -information about permissions, see Permissions Related to Bucket Subresource Operations and -Managing Access Permissions to Your Amazon S3 Resources. This section describes the latest -revision of this action. We recommend that you use this revised API for application -development. For backward compatibility, Amazon S3 continues to support the prior version -of this API, ListObjects. To get a list of your buckets, see ListBuckets. The following -operations are related to ListObjectsV2: GetObject PutObject CreateBucket +listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To +use this operation, you must have READ access to the bucket. To use this action in an +Identity and Access Management (IAM) policy, you must have permission to perform the +s3:ListBucket action. The bucket owner has this permission by default and can grant this +permission to others. For more information about permissions, see Permissions Related to +Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources +in the Amazon S3 User Guide. This section describes the latest revision of this action. We +recommend that you use this revised API operation for application development. For backward +compatibility, Amazon S3 continues to support the prior version of this API operation, +ListObjects. To get a list of your buckets, see ListBuckets. The following operations are +related to ListObjectsV2: GetObject PutObject CreateBucket # Arguments - `bucket`: Bucket name to list. When using this action with an access point, you must @@ -3890,18 +3896,19 @@ operations are related to ListObjectsV2: GetObject PutObject CreateBu the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"continuation-token"`: ContinuationToken indicates Amazon S3 that the list is being +- `"continuation-token"`: ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. -- `"delimiter"`: A delimiter is a character you use to group keys. +- `"delimiter"`: A delimiter is a character that you use to group keys. - `"encoding-type"`: Encoding type used by Amazon S3 to encode object keys in the response. -- `"fetch-owner"`: The owner field is not present in listV2 by default, if you want to - return owner field with each key in the result then set the fetch owner field to true. -- `"max-keys"`: Sets the maximum number of keys returned in the response. By default the +- `"fetch-owner"`: The owner field is not present in ListObjectsV2 by default. If you want + to return the owner field with each key in the result, then set the FetchOwner field to + true. +- `"max-keys"`: Sets the maximum number of keys returned in the response. By default, the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. - `"prefix"`: Limits the response to keys that begin with the specified prefix. @@ -3910,6 +3917,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden (access denied). +- `"x-amz-optional-object-attributes"`: Specifies the optional fields that you want + returned in the response. Fields that you do not specify are not returned. - `"x-amz-request-payer"`: Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests. @@ -3966,7 +3975,7 @@ AbortMultipartUpload GetObjectAttributes ListMultipartUploads the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `upload_id`: Upload ID identifying the multipart upload whose parts are being listed. @@ -4108,12 +4117,12 @@ longer affect permissions. You must use policies to grant access to your bucket objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide. -Permissions You can set access permissions using one of the following methods: Specify a -canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, -known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. -Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot -use other access control-specific headers in your request. For more information, see Canned -ACL. Specify access permissions explicitly with the x-amz-grant-read, +Permissions You can set access permissions by using one of the following methods: +Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of +predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and +permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, +you cannot use other access control-specific headers in your request. For more information, +see Canned ACL. Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these @@ -4367,18 +4376,20 @@ This action uses the encryption subresource to configure default encryption and Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption -with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). If you -specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. -For information about bucket default encryption, see Amazon S3 bucket default encryption in -the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket -Keys in the Amazon S3 User Guide. This action requires Amazon Web Services Signature -Version 4. For more information, see Authenticating Requests (Amazon Web Services -Signature Version 4). To use this operation, you must have permissions to perform the -s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The -bucket owner can grant this permission to others. For more information about permissions, -see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to -Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related -to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption +with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with +Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided +keys (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure +Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 +bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket +Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. This action requires Amazon +Web Services Signature Version 4. For more information, see Authenticating Requests +(Amazon Web Services Signature Version 4). To use this operation, you must have +permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this +permission by default. The bucket owner can grant this permission to others. For more +information about permissions, see Permissions Related to Bucket Subresource Operations and +Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The +following operations are related to PutBucketEncryption: GetBucketEncryption +DeleteBucketEncryption # Arguments - `bucket`: Specifies default encryption for a bucket using server-side encryption with @@ -4690,23 +4701,23 @@ backward compatibility. For the related API description, see PutBucketLifecycle. You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the -following: Filter identifying a subset of objects to which the rule applies. The filter -can be based on a key name prefix, object tags, or a combination of both. Status whether -the rule is in effect. One or more lifecycle transition and expiration actions that you -want Amazon S3 to perform on the objects identified by the filter. If the state of your -bucket is versioning-enabled or versioning-suspended, you can have many versions of the -same object (one current version and zero or more noncurrent versions). Amazon S3 provides -predefined actions that you can specify for current and noncurrent object versions. For -more information, see Object Lifecycle Management and Lifecycle Configuration Elements. -Permissions By default, all Amazon S3 resources are private, including buckets, objects, -and related subresources (for example, lifecycle configuration and website configuration). -Only the resource owner (that is, the Amazon Web Services account that created it) can -access the resource. The resource owner can optionally grant access permissions to others -by writing an access policy. For this operation, a user must get the -s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. Explicit -deny also supersedes any other permissions. If you want to block users or accounts from -removing or deleting objects from your bucket, you must deny them permissions for the -following actions: s3:DeleteObject s3:DeleteObjectVersion +following: A filter identifying a subset of objects to which the rule applies. The filter +can be based on a key name prefix, object tags, or a combination of both. A status +indicating whether the rule is in effect. One or more lifecycle transition and expiration +actions that you want Amazon S3 to perform on the objects identified by the filter. If the +state of your bucket is versioning-enabled or versioning-suspended, you can have many +versions of the same object (one current version and zero or more noncurrent versions). +Amazon S3 provides predefined actions that you can specify for current and noncurrent +object versions. For more information, see Object Lifecycle Management and Lifecycle +Configuration Elements. Permissions By default, all Amazon S3 resources are private, +including buckets, objects, and related subresources (for example, lifecycle configuration +and website configuration). Only the resource owner (that is, the Amazon Web Services +account that created it) can access the resource. The resource owner can optionally grant +access permissions to others by writing an access policy. For this operation, a user must +get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. +An explicit deny also supersedes any other permissions. If you want to block users or +accounts from removing or deleting objects from your bucket, you must deny them permissions +for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration @@ -4766,15 +4777,15 @@ log delivery uses the bucket owner enforced setting for S3 Object Ownership, you the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) to whom you're -assigning access rights (using request elements) in the following ways: By the person's -ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" +assigning access rights (by using request elements) in the following ways: By the +person's ID: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName>< -;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional +;>GranteesEmail<></DisplayName> </Grantee> DisplayName is optional and ignored in the request. By Email address: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<> ;</EmailAddress></Grantee> The grantee is resolved to the CanonicalUser and, -in a response to a GET Object acl request, appears as the CanonicalUser. By URI: +in a response to a GETObjectAcl request, appears as the CanonicalUser. By URI: <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/Authenticate dUsers<></URI></Grantee> To enable logging, you use LoggingEnabled @@ -4853,10 +4864,10 @@ Subresource Operations and Managing Access Permissions to Your Amazon S3 Resourc information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch. The following operations are related to PutBucketMetricsConfiguration: DeleteBucketMetricsConfiguration GetBucketMetricsConfiguration -ListBucketMetricsConfigurations GetBucketLifecycle has the following special error: -Error code: TooManyConfigurations Description: You are attempting to create a new -configuration but have already reached the 1,000-configuration limit. HTTP Status Code: -HTTP 400 Bad Request +ListBucketMetricsConfigurations PutBucketMetricsConfiguration has the following special +error: Error code: TooManyConfigurations Description: You are attempting to create a +new configuration but have already reached the 1,000-configuration limit. HTTP Status +Code: HTTP 400 Bad Request # Arguments - `bucket`: The name of the bucket for which the metrics configuration is set. @@ -4986,11 +4997,11 @@ disable notifications by adding the empty NotificationConfiguration element. For information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference. By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can -use a bucket policy to grant permission to other users to set this configuration with -s3:PutBucketNotification permission. The PUT notification is an atomic operation. For -example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda -function configurations. When you send a PUT request with this configuration, Amazon S3 -sends test messages to your SNS topic. If the message fails, the entire PUT action will +use a bucket policy to grant permission to other users to set this configuration with the +required s3:PutBucketNotification permission. The PUT notification is an atomic operation. +For example, suppose your notification configuration includes SNS topic, SQS queue, and +Lambda function configurations. When you send a PUT request with this configuration, Amazon +S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket. If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the @@ -5577,24 +5588,24 @@ successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging in your IAM permissions. The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the -Amazon S3 User Guide. You have three mutually exclusive options to protect data using +Amazon S3 User Guide. You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon -Web Services KMS keys (SSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts -data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You -can optionally tell Amazon S3 to encrypt data at by rest using server-side encryption with -other key options. For more information, see Using Server-Side Encryption. When adding a -new object, you can use headers to grant ACL-based permissions to individual Amazon Web -Services accounts or to predefined groups defined by Amazon S3. These permissions are then -added to the ACL on the object. By default, all objects are private. Only the owner has -full access control. For more information, see Access Control List (ACL) Overview and -Managing ACLs Using the REST API. If the bucket that you're uploading objects to uses the -bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer -affect permissions. Buckets that use this setting only accept PUT requests that don't -specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the -bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML -format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon -Web Services accounts) fail and return a 400 error with the error code +Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 +encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by +default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side +encryption with other key options. For more information, see Using Server-Side Encryption. +When adding a new object, you can use headers to grant ACL-based permissions to individual +Amazon Web Services accounts or to predefined groups defined by Amazon S3. These +permissions are then added to the ACL on the object. By default, all objects are private. +Only the owner has full access control. For more information, see Access Control List (ACL) +Overview and Managing ACLs Using the REST API. If the bucket that you're uploading objects +to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no +longer affect permissions. Buckets that use this setting only accept PUT requests that +don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as +the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the +XML format. PUT requests that contain other ACLs (for example, custom grants to certain +Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account @@ -5623,7 +5634,7 @@ GetBucketVersioning. For more information about related Amazon S3 APIs, see the the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the PUT action was initiated. # Optional Parameters @@ -5695,19 +5706,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - `"x-amz-server-side-encryption"`: The server-side encryption algorithm used when storing - this object in Amazon S3 (for example, AES256, aws:kms). + this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). - `"x-amz-server-side-encryption-aws-kms-key-id"`: If x-amz-server-side-encryption has a - valid value of aws:kms, this header specifies the ID of the Amazon Web Services Key - Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that - was used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not - provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services - managed key to protect the data. If the KMS key does not exist in the same account issuing - the command, you must use the full ARN and not just the ID. + valid value of aws:kms or aws:kms:dsse, this header specifies the ID of the Key Management + Service (KMS) symmetric encryption customer managed key that was used for the object. If + you specify x-amz-server-side-encryption:aws:kms or + x-amz-server-side-encryption:aws:kms:dsse, but do not provide + x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed + key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's + issuing the command, you must use the full ARN and not just the ID. - `"x-amz-server-side-encryption-bucket-key-enabled"`: Specifies whether Amazon S3 should - use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS - (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - bucket-level settings for S3 Bucket Key. + use an S3 Bucket Key for object encryption with server-side encryption using Key Management + Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 + Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action + doesn’t affect bucket-level settings for S3 Bucket Key. - `"x-amz-server-side-encryption-context"`: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This @@ -5843,7 +5855,7 @@ CopyObject GetObject AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -6126,7 +6138,7 @@ PutObjectTagging: GetObjectTagging DeleteObjectTagging AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Name of the object key. - `tagging`: Container for the TagSet and Tag elements @@ -6286,67 +6298,67 @@ operation, you must have permissions to perform the s3:RestoreObject action. The owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. -Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval or S3 -Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 -Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the -S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first -initiate a restore request, and then wait until a temporary copy of the object is -available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 -Standard storage class in your S3 bucket. To access an archived object, you must restore -the object for the duration (number of days) that you specify. For objects in the Archive -Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -restore request, and then wait until the object is moved into the Frequent Access tier. To -restore a specific object version, you can provide a version ID. If you don't provide a -version ID, Amazon S3 restores the current version. When restoring an archived object, you -can specify one of the following data access tier options in the Tier element of the -request body: Expedited - Expedited retrievals allow you to quickly access your data -stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive -tier when occasional urgent requests for restoring archives are required. For all but the -largest archived objects (250 MB+), data accessed using Expedited retrievals is typically -made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity -for Expedited retrievals is available when you need it. Expedited retrievals and -provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive -storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard -retrievals allow you to access any of your archived objects within several hours. This is -the default option for retrieval requests that do not specify the retrieval option. -Standard retrievals typically finish within 3–5 hours for objects stored in the S3 -Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage -class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects -stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the -S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to -retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically -finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage -class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost -retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish -within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 -Intelligent-Tiering Deep Archive tier. For more information about archive retrieval -options and provisioned capacity for Expedited data access, see Restoring Archived Objects -in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the -restore speed to a faster speed while it is in progress. For more information, see -Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the -status of object restoration, you can send a HEAD request. Operations return the -x-amz-restore header, which provides information about the restoration status, in the -response. You can use Amazon S3 event notifications to notify you when a restore is -initiated or completed. For more information, see Configuring Amazon S3 Event Notifications -in the Amazon S3 User Guide. After restoring an archived object, you can update the -restoration period by reissuing the request with a new period. Amazon S3 updates the -restoration period relative to the current time and charges only for the request-there are -no data transfer charges. You cannot update the restoration period when Amazon S3 is -actively processing your current restore request for the object. If your bucket has a -lifecycle configuration with a rule that includes an expiration action, the object -expiration overrides the life span that you specify in a restore request. For example, if -you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, -Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, -see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User -Guide. Responses A successful action returns either the 200 OK or 202 Accepted status -code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in -the response. If the object is previously restored, Amazon S3 returns 200 OK in the -response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore -is already in progress. (This error does not apply to SELECT type requests.) HTTP -Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: -GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not -available. Try again later. (Returned if there is insufficient capacity to process the +Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible +Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or +S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in +the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +classes, you must first initiate a restore request, and then wait until a temporary copy of +the object is available. If you want a permanent copy of the object, create a copy of it in +the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you +must restore the object for the duration (number of days) that you specify. For objects in +the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first +initiate a restore request, and then wait until the object is moved into the Frequent +Access tier. To restore a specific object version, you can provide a version ID. If you +don't provide a version ID, Amazon S3 restores the current version. When restoring an +archived object, you can specify one of the following data access tier options in the Tier +element of the request body: Expedited - Expedited retrievals allow you to quickly +access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring +archives are required. For all but the largest archived objects (250 MB+), data accessed +using Expedited retrievals is typically made available within 1–5 minutes. Provisioned +capacity ensures that retrieval capacity for Expedited retrievals is available when you +need it. Expedited retrievals and provisioned capacity are not available for objects stored +in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. + Standard - Standard retrievals allow you to access any of your archived objects within +several hours. This is the default option for retrieval requests that do not specify the +retrieval option. Standard retrievals typically finish within 3–5 hours for objects +stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 +Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored +in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk +retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 +Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even +petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for +objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 +Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option +when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours +for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +Deep Archive tier. For more information about archive retrieval options and provisioned +capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User +Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster +speed while it is in progress. For more information, see Upgrading the speed of an +in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, +you can send a HEAD request. Operations return the x-amz-restore header, which provides +information about the restoration status, in the response. You can use Amazon S3 event +notifications to notify you when a restore is initiated or completed. For more information, +see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring +an archived object, you can update the restoration period by reissuing the request with a +new period. Amazon S3 updates the restoration period relative to the current time and +charges only for the request-there are no data transfer charges. You cannot update the +restoration period when Amazon S3 is actively processing your current restore request for +the object. If your bucket has a lifecycle configuration with a rule that includes an +expiration action, the object expiration overrides the life span that you specify in a +restore request. For example, if you restore an object copy for 10 days, but the object is +scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information +about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle +Management in Amazon S3 User Guide. Responses A successful action returns either the 200 +OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 +returns 202 Accepted in the response. If the object is previously restored, Amazon S3 +returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress +Cause: Object restore is already in progress. (This error does not apply to SELECT type +requests.) HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client +Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently +not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration @@ -6364,7 +6376,7 @@ GetBucketNotificationConfiguration AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts - ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the action was initiated. # Optional Parameters @@ -6442,12 +6454,15 @@ information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), -you cannot specify the range of bytes of an object to return. GLACIER, DEEP_ARCHIVE and -REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or -REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage -Classes in the Amazon S3 User Guide. Special Errors For a list of special errors for -this operation, see List of SELECT Object Content Error Codes The following operations -are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration +you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, +and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS +access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the +GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the +ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +class. For more information about storage classes, see Using Amazon S3 storage classes in +the Amazon S3 User Guide. Special Errors For a list of special errors for this +operation, see List of SELECT Object Content Error Codes The following operations are +related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration # Arguments @@ -6608,7 +6623,7 @@ CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipart the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `part_number`: Part number of part being uploaded. This is a positive integer between 1 and 10,000. @@ -6754,7 +6769,7 @@ CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipart the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on - Outposts ARNs, see What is S3 on Outposts in the Amazon S3 User Guide. + Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - `key`: Object key for which the multipart upload was initiated. - `part_number`: Part number of part being copied. This is a positive integer between 1 and 10,000. diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index 80526ac426..12d3df6c64 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -500,9 +500,14 @@ end create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn) create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Autopilot job. Find the best-performing model after you run an Autopilot job by -calling DescribeAutoMLJob. For information about how to use Autopilot, see Automate Model -Development with Amazon SageMaker Autopilot. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We +recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer +backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to +those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as +image or text classification. Find guidelines about how to migrate a CreateAutoMLJob to +CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the +best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 +(recommended) or DescribeAutoMLJob. # Arguments - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account @@ -519,9 +524,9 @@ Development with Amazon SageMaker Autopilot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobConfig"`: A collection of settings used to configure an AutoML job. -- `"AutoMLJobObjective"`: Defines the objective metric used to measure the predictive - quality of an AutoML job. You provide an AutoMLJobObjectiveMetricName and Autopilot infers - whether to minimize or maximize it. For CreateAutoMLJobV2, only Accuracy is supported. +- `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a + job. If not specified, the default objective metric depends on the problem type. See + AutoMLJobObjective for the default values. - `"GenerateCandidateDefinitionsOnly"`: Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings. @@ -584,18 +589,23 @@ end create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn) create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Amazon SageMaker AutoML job that uses non-tabular data such as images or text -for Computer Vision or Natural Language Processing problems. Find the resulting model after -you run an AutoML job V2 by calling DescribeAutoMLJobV2. To create an AutoMLJob using -tabular data, see CreateAutoMLJob. This API action is callable through SageMaker Canvas -only. Calling it directly from the CLI or an SDK results in an error. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. +CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and +DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular +problem types identical to those of its previous version CreateAutoMLJob, as well as +non-tabular problem types such as image or text classification. Find guidelines about how +to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to +CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, +see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML +job V2 by calling DescribeAutoMLJobV2. # Arguments - `auto_mljob_input_data_config`: An array of channel objects describing the input data and - their location. Each channel is a named input source. Similar to InputDataConfig supported - by CreateAutoMLJob. The supported formats depend on the problem type: - ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile TextClassification: - S3Prefix + their location. Each channel is a named input source. Similar to the InputDataConfig + attribute in the CreateAutoMLJob input parameters. The supported formats depend on the + problem type: For tabular problem types: S3Prefix, ManifestFile. For image + classification: S3Prefix, ManifestFile, AugmentedManifestFile. For text classification: + S3Prefix. For time-series forecasting: S3Prefix. - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account and is case insensitive. - `auto_mlproblem_type_config`: Defines the configuration settings of one of the supported @@ -607,13 +617,16 @@ only. Calling it directly from the CLI or an SDK results in an error. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a - job. For CreateAutoMLJobV2, only Accuracy is supported. + job. If not specified, the default objective metric depends on the problem type. For the + list of default values per problem type, see AutoMLJobObjective. For tabular problem + types, you must either provide both the AutoMLJobObjective and indicate the type of + supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or + none at all. - `"DataSplitConfig"`: This structure specifies how to split the data into train and - validation datasets. If you are using the V1 API (for example CreateAutoMLJob) or the V2 - API for Natural Language Processing problems (for example CreateAutoMLJobV2 with a - TextClassificationJobConfig problem type), the validation and training datasets must - contain the same headers. Also, for V1 API jobs, the validation dataset must be less than 2 - GB in size. + validation datasets. The validation and training datasets must contain the same headers. + For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB + in size. This attribute must not be set for the time-series forecasting problem type, as + Autopilot automatically splits the input dataset into training and validation sets. - `"ModelDeployConfig"`: Specifies how to generate the endpoint name for an automatic one-click Autopilot model deployment. - `"SecurityConfig"`: The security configuration for traffic encryption or Amazon VPC @@ -6257,7 +6270,8 @@ end describe_auto_mljob(auto_mljob_name) describe_auto_mljob(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML job. +Returns information about an AutoML job created by calling CreateAutoMLJob. AutoML jobs +created by calling CreateAutoMLJobV2 cannot be described by DescribeAutoMLJob. # Arguments - `auto_mljob_name`: Requests information about an AutoML job using its unique name. @@ -6292,12 +6306,11 @@ end describe_auto_mljob_v2(auto_mljob_name) describe_auto_mljob_v2(auto_mljob_name, params::Dict{String,<:Any}) -Returns information about an Amazon SageMaker AutoML V2 job. This API action is callable -through SageMaker Canvas only. Calling it directly from the CLI or an SDK results in an -error. +Returns information about an AutoML job created by calling CreateAutoMLJobV2 or +CreateAutoMLJob. # Arguments -- `auto_mljob_name`: Requests information about an AutoML V2 job using its unique name. +- `auto_mljob_name`: Requests information about an AutoML job V2 using its unique name. """ function describe_auto_mljob_v2( @@ -6785,7 +6798,8 @@ Use this operation to describe a FeatureGroup. The response includes information creation time, FeatureGroup name, the unique identifier for each FeatureGroup, and more. # Arguments -- `feature_group_name`: The name of the FeatureGroup you want described. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the FeatureGroup you want + described. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -6826,7 +6840,8 @@ end Shows the metadata for a feature within a feature group. # Arguments -- `feature_group_name`: The name of the feature group containing the feature. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group + containing the feature. - `feature_name`: The name of the feature. """ @@ -10970,6 +10985,42 @@ function list_projects( ) end +""" + list_resource_catalogs() + list_resource_catalogs(params::Dict{String,<:Any}) + + Lists Amazon SageMaker Catalogs based on given filters and orders. The maximum number of +ResourceCatalogs viewable is 1000. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Use this parameter to search for ResourceCatalogs created after a + specific date and time. +- `"CreationTimeBefore"`: Use this parameter to search for ResourceCatalogs created before + a specific date and time. +- `"MaxResults"`: The maximum number of results returned by ListResourceCatalogs. +- `"NameContains"`: A string that partially matches one or more ResourceCatalogs names. + Filters ResourceCatalog by name. +- `"NextToken"`: A token to resume pagination of ListResourceCatalogs results. +- `"SortBy"`: The value on which the resource catalog list is sorted. +- `"SortOrder"`: The order in which the resource catalogs are listed. +""" +function list_resource_catalogs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListResourceCatalogs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_resource_catalogs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListResourceCatalogs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_spaces() list_spaces(params::Dict{String,<:Any}) @@ -11739,6 +11790,12 @@ Resources Reference for more information. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CrossAccountFilterOption"`: A cross account filter option. When the value is + \"CrossAccount\" the search results will only include resources made discoverable to you + from other accounts. When the value is \"SameAccount\" or null the search results will only + include resources from your account. Default is null. For more information on searching for + resources made discoverable to your account, see Search discoverable resources in the + SageMaker Developer Guide. The maximum number of ResourceCatalogs viewable is 1000. - `"MaxResults"`: The maximum number of results to return. - `"NextToken"`: If more than MaxResults resources match the specified SearchExpression, the response includes a NextToken. The NextToken can be passed to the next SearchRequest to @@ -13193,10 +13250,19 @@ end update_feature_group(feature_group_name) update_feature_group(feature_group_name, params::Dict{String,<:Any}) -Updates the feature group. +Updates the feature group by either adding features or updating the online store +configuration. Use one of the following request parameters at a time while using the +UpdateFeatureGroup API. You can add features for your feature group using the +FeatureAdditions request parameter. Features cannot be removed from a feature group. You +can update the online store configuration by using the OnlineStoreConfig request parameter. +If a TtlDuration is specified, the default TtlDuration applies for all records added to the +feature group after the feature group is updated. If a record level TtlDuration exists from +using the PutRecord API, the record level TtlDuration applies to that record instead of the +default TtlDuration. # Arguments -- `feature_group_name`: The name of the feature group that you're updating. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group that + you're updating. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -13204,6 +13270,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys asynchronous operation. When you get an HTTP 200 response, you've made a valid request. It takes some time after you've made a valid request for Feature Store to update the feature group. +- `"OnlineStoreConfig"`: Updates the feature group online store configuration. """ function update_feature_group( FeatureGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -13239,8 +13306,8 @@ end Updates the description and parameters of the feature group. # Arguments -- `feature_group_name`: The name of the feature group containing the feature that you're - updating. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group + containing the feature that you're updating. - `feature_name`: The name of the feature that you're updating. # Optional Parameters diff --git a/src/services/sagemaker_featurestore_runtime.jl b/src/services/sagemaker_featurestore_runtime.jl index a37491edb9..5378c46f7e 100644 --- a/src/services/sagemaker_featurestore_runtime.jl +++ b/src/services/sagemaker_featurestore_runtime.jl @@ -11,9 +11,15 @@ using AWS.UUIDs Retrieves a batch of Records from a FeatureGroup. # Arguments -- `identifiers`: A list of FeatureGroup names, with their corresponding RecordIdentifier - value, and Feature name that have been requested to be retrieved in batch. +- `identifiers`: A list containing the name or Amazon Resource Name (ARN) of the + FeatureGroup, the list of names of Features to be retrieved, and the corresponding + RecordIdentifier values as strings. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpirationTimeResponse"`: Parameter to request ExpiresAt in response. If Enabled, + BatchGetRecord will return the value of ExpiresAt, if it is not null. If Disabled and null, + BatchGetRecord will return null. """ function batch_get_record(Identifiers; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker_featurestore_runtime( @@ -45,22 +51,23 @@ end delete_record(event_time, feature_group_name, record_identifier_value_as_string, params::Dict{String,<:Any}) Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both -SOFT_DELETE and HARD_DELETE. For SOFT_DELETE (default), feature columns are set to null and -the record is no longer retrievable by GetRecord or BatchGetRecord. For HARD_DELETE, the +SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and +the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore with feature values set to null, is_deleted value set to True, and EventTime set to the delete input EventTime. Note that the EventTime specified in DeleteRecord should be set later than the EventTime of the existing record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur: For -SOFT_DELETE, the existing (undeleted) record remains in the OnlineStore, though the delete -record marker is still written to the OfflineStore. HARD_DELETE returns EventTime: 400 +SoftDelete, the existing (undeleted) record remains in the OnlineStore, though the delete +record marker is still written to the OfflineStore. HardDelete returns EventTime: 400 ValidationException to indicate that the delete operation failed. No delete record marker is written to the OfflineStore. # Arguments - `event_time`: Timestamp indicating when the deletion event occurred. EventTime can be used to query data at a certain point in time. -- `feature_group_name`: The name of the feature group to delete the record from. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group to + delete the record from. - `record_identifier_value_as_string`: The value for the RecordIdentifier that uniquely identifies the record, in string format. @@ -123,13 +130,16 @@ OnlineStore can be retrieved. If no Record with RecordIdentifierValue is found, empty result is returned. # Arguments -- `feature_group_name`: The name of the feature group from which you want to retrieve a - record. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group from + which you want to retrieve a record. - `record_identifier_value_as_string`: The value that corresponds to RecordIdentifier type and uniquely identifies the record in the FeatureGroup. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExpirationTimeResponse"`: Parameter to request ExpiresAt in response. If Enabled, + GetRecord will return the value of ExpiresAt, if it is not null. If Disabled and null, + GetRecord will return null. - `"FeatureName"`: List of names of Features to be retrieved. If not specified, the latest value for all the Features are returned. """ @@ -173,14 +183,21 @@ end put_record(feature_group_name, record) put_record(feature_group_name, record, params::Dict{String,<:Any}) -Used for data ingestion into the FeatureStore. The PutRecord API writes to both the -OnlineStore and OfflineStore. If the record is the latest record for the recordIdentifier, -the record is written to both the OnlineStore and OfflineStore. If the record is a historic -record, it is written only to the OfflineStore. +The PutRecord API is used to ingest a list of Records into your feature group. If a new +record’s EventTime is greater, the new record is written to both the OnlineStore and +OfflineStore. Otherwise, the record is a historic record and it is written only to the +OfflineStore. You can specify the ingestion to be applied to the OnlineStore, +OfflineStore, or both by using the TargetStores request parameter. You can set the +ingested record to expire at a given time to live (TTL) duration after the record’s event +time, ExpiresAt = EventTime + TtlDuration, by specifying the TtlDuration parameter. A +record level TtlDuration is set when specifying the TtlDuration parameter using the +PutRecord API call. If the input TtlDuration is null or unspecified, TtlDuration is set to +the default feature group level TtlDuration. A record level TtlDuration supersedes the +group level TtlDuration. # Arguments -- `feature_group_name`: The name of the feature group that you want to insert the record - into. +- `feature_group_name`: The name or Amazon Resource Name (ARN) of the feature group that + you want to insert the record into. - `record`: List of FeatureValues to be inserted. This will be a full over-write. If you only want to update few of the feature values, do the following: Use GetRecord to retrieve the latest record. Update the record returned from GetRecord. Use PutRecord @@ -190,6 +207,9 @@ record, it is written only to the OfflineStore. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"TargetStores"`: A list of stores to which you're adding the record. By default, Feature Store adds the record to all of the stores that you're using for the FeatureGroup. +- `"TtlDuration"`: Time to live duration, where the record is hard deleted after the + expiration time is reached; ExpiresAt = EventTime + TtlDuration. For information on + HardDelete, see the DeleteRecord API in the Amazon SageMaker API Reference guide. """ function put_record( FeatureGroupName, Record; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 718c6b50c4..e98d4eabe9 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -1149,8 +1149,10 @@ except SecretBinary or SecretString because it might be logged. For more informa Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer -managed key, you must also have kms:GenerateDataKey and kms:Decrypt permissions on the key. -For more information, see Secret encryption and decryption. +managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt +permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission +to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new +key. For more information, see Secret encryption and decryption. # Arguments - `secret_id`: The ARN or name of the secret. For an ARN, we recommend that you specify a @@ -1170,19 +1172,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: The description of the secret. - `"KmsKeyId"`: The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels - AWSCURRENT, AWSPENDING, or AWSPREVIOUS. For more information about versions and staging - labels, see Concepts: Version. A key alias is always prefixed by alias/, for example - alias/aws/secretsmanager. For more information, see About aliases. If you set this to an - empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. - If this key doesn't already exist in your account, then Secrets Manager creates it for you - automatically. All users and roles in the Amazon Web Services account automatically have - access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time - significant delay in returning the result. You can only use the Amazon Web Services - managed key aws/secretsmanager if you call this operation using credentials from the same - Amazon Web Services account that owns the secret. If the secret is in a different account, - then you must use a customer managed key and provide the ARN of that KMS key in this field. - The user making the call must have permissions to both the secret and the KMS key in their - respective accounts. + AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new + key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more + information about versions and staging labels, see Concepts: Version. A key alias is always + prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About + aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services + managed key aws/secretsmanager. If this key doesn't already exist in your account, then + Secrets Manager creates it for you automatically. All users and roles in the Amazon Web + Services account automatically have access to use aws/secretsmanager. Creating + aws/secretsmanager can result in a one-time significant delay in returning the result. + You can only use the Amazon Web Services managed key aws/secretsmanager if you call this + operation using credentials from the same Amazon Web Services account that owns the secret. + If the secret is in a different account, then you must use a customer managed key and + provide the ARN of that KMS key in this field. The user making the call must have + permissions to both the secret and the KMS key in their respective accounts. - `"SecretBinary"`: The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. diff --git a/src/services/securityhub.jl b/src/services/securityhub.jl index 804614c910..04a0dfe83e 100644 --- a/src/services/securityhub.jl +++ b/src/services/securityhub.jl @@ -653,8 +653,9 @@ end - `actions`: One or more actions to update finding fields if a finding matches the conditions specified in Criteria. - `criteria`: A set of ASFF finding field attributes and corresponding expected values - that Security Hub uses to filter findings. If a finding matches the conditions specified in - this parameter, Security Hub applies the rule action to the finding. + that Security Hub uses to filter findings. If a rule is enabled and a finding matches the + conditions specified in this parameter, Security Hub applies the rule action to the + finding. - `description`: A description of the rule. - `rule_name`: The name of the rule. - `rule_order`: An integer ranging from 1 to 1000 that represents the order in which the @@ -665,14 +666,13 @@ end Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IsTerminal"`: Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria - for multiple rules, and each rule has different actions. If the value of this field is set - to true for a rule, Security Hub applies the rule action to a finding that matches the rule - criteria and won't evaluate other rules for the finding. The default value of this field is - false. + for multiple rules, and each rule has different actions. If a rule is terminal, Security + Hub applies the rule action to a finding that matches the rule criteria and doesn't + evaluate other rules for the finding. By default, a rule isn't terminal. - `"RuleStatus"`: Whether the rule is active after it is created. If this parameter is - equal to Enabled, Security Hub will apply the rule to findings and finding updates after - the rule is created. To change the value of this parameter after creating a rule, use - BatchUpdateAutomationRules. + equal to ENABLED, Security Hub starts applying the rule to findings and finding updates + after the rule is created. To change the value of this parameter after creating a rule, use + BatchUpdateAutomationRules . - `"Tags"`: User-defined tags that help you label the purpose of a rule. """ function create_automation_rule( diff --git a/src/services/securitylake.jl b/src/services/securitylake.jl index 6912d4beb9..b0e2a262da 100644 --- a/src/services/securitylake.jl +++ b/src/services/securitylake.jl @@ -12,7 +12,7 @@ Adds a natively supported Amazon Web Service as an Amazon Security Lake source. source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web -Service as a source, Security Lake starts collecting logs and events from it, You can use +Service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source. @@ -107,17 +107,17 @@ end Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before -enabling log collection in Regions. By default, the CreateDataLake Security Lake in all -Regions. To specify particular Regions, configure these Regions using the configurations -parameter. If you have already enabled Security Lake in a Region when you call this -command, the command will update the Region if you provide new configuration parameters. If -you have not already enabled Security Lake in the Region when you call this API, it will -set up the data lake in the Region with the specified configurations. When you enable -Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This -includes ingesting security data from sources, storing data, and making data accessible to -subscribers. Security Lake also enables all the existing settings and resources that it -stores or maintains for your Amazon Web Services account in the current Region, including -security log and event data. For more information, see the Amazon Security Lake User Guide. +enabling log collection in Regions. To specify particular Regions, configure these Regions +using the configurations parameter. If you have already enabled Security Lake in a Region +when you call this command, the command will update the Region if you provide new +configuration parameters. If you have not already enabled Security Lake in the Region when +you call this API, it will set up the data lake in the Region with the specified +configurations. When you enable Security Lake, it starts ingesting security data after the +CreateAwsLogSource call. This includes ingesting security data from sources, storing data, +and making data accessible to subscribers. Security Lake also enables all the existing +settings and resources that it stores or maintains for your Amazon Web Services account in +the current Region, including security log and event data. For more information, see the +Amazon Security Lake User Guide. # Arguments - `configurations`: Specify the Region or Regions that will contribute data to the rollup @@ -126,6 +126,11 @@ security log and event data. For more information, see the Amazon Security Lake the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"tags"`: An array of objects, one for each tag to associate with the data lake + configuration. For each tag, you must specify both a tag key and a tag value. A tag value + cannot be null, but it can be an empty string. """ function create_data_lake( configurations, @@ -286,6 +291,9 @@ Region. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessTypes"`: The Amazon S3 or Lake Formation access type. - `"subscriberDescription"`: The description for your subscriber account in Security Lake. +- `"tags"`: An array of objects, one for each tag to associate with the subscriber. For + each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but + it can be an empty string. """ function create_subscriber( sources, @@ -521,15 +529,15 @@ end delete_data_lake_organization_configuration(auto_enable_new_account) delete_data_lake_organization_configuration(auto_enable_new_account, params::Dict{String,<:Any}) -Removes automatic the enablement of configuration settings for new member accounts (but -retains the settings for the delegated administrator) from Amazon Security Lake. You must -run this API using the credentials of the delegated administrator. When you run this API, -new member accounts that are added after the organization enables Security Lake won't -contribute to the data lake. +Turns off automatic enablement of Amazon Security Lake for member accounts that are added +to an organization in Organizations. Only the delegated Security Lake administrator for an +organization can perform this operation. If the delegated Security Lake administrator +performs this operation, new member accounts won't automatically contribute data to the +data lake. # Arguments -- `auto_enable_new_account`: Removes the automatic enablement of configuration settings for - new member accounts in Security Lake. +- `auto_enable_new_account`: Turns off automatic enablement of Security Lake for member + accounts that are added to an organization. """ function delete_data_lake_organization_configuration( @@ -839,8 +847,8 @@ end list_data_lakes(params::Dict{String,<:Any}) Retrieves the Amazon Security Lake configuration object for the specified Amazon Web -Services account ID. You can use the ListDataLakes API to know whether Security Lake is -enabled for any region. +Services Regions. You can use this operation to determine whether Security Lake is enabled +for a Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -930,6 +938,43 @@ function list_subscribers( ) end +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Retrieves the tags (keys and values) that are associated with an Amazon Security Lake +resource: a subscriber, or the data lake configuration for your Amazon Web Services account +in a particular Amazon Web Services Region. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource to + retrieve the tags for. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return securitylake( + "GET", + "/v1/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "GET", + "/v1/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_data_lake_delegated_administrator(account_id) register_data_lake_delegated_administrator(account_id, params::Dict{String,<:Any}) @@ -970,6 +1015,93 @@ function register_data_lake_delegated_administrator( ) end +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds or updates one or more tags that are associated with an Amazon Security Lake resource: +a subscriber, or the data lake configuration for your Amazon Web Services account in a +particular Amazon Web Services Region. A tag is a label that you can define and associate +with Amazon Web Services resources. Each tag consists of a required tag key and an +associated tag value. A tag key is a general label that acts as a category for a more +specific tag value. A tag value acts as a descriptor for a tag key. Tags can help you +identify, categorize, and manage resources in different ways, such as by owner, +environment, or other criteria. For more information, see Tagging Amazon Security Lake +resources in the Amazon Security Lake User Guide. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource to + add or update the tags for. +- `tags`: An array of objects, one for each tag (key and value) to associate with the + Amazon Security Lake resource. For each tag, you must specify both a tag key and a tag + value. A tag value cannot be null, but it can be an empty string. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return securitylake( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "POST", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes one or more tags (keys and values) from an Amazon Security Lake resource: a +subscriber, or the data lake configuration for your Amazon Web Services account in a +particular Amazon Web Services Region. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Security Lake resource to + remove one or more tags from. +- `tag_keys`: A list of one or more tag keys. For each value in the list, specify the tag + key for a tag to remove from the Amazon Security Lake resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return securitylake( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return securitylake( + "DELETE", + "/v1/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_lake(configurations) update_data_lake(configurations, params::Dict{String,<:Any}) diff --git a/src/services/sfn.jl b/src/services/sfn.jl index 9a5f6e5287..1c915acb96 100644 --- a/src/services/sfn.jl +++ b/src/services/sfn.jl @@ -62,14 +62,16 @@ Creates a state machine. A state machine consists of a collection of states that work (Task states), determine to which states to transition next (Choice states), stop an execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the -Step Functions User Guide. This operation is eventually consistent. The results are best -effort and may not reflect very recent updates and changes. CreateStateMachine is an -idempotent API. Subsequent requests won’t create a duplicate resource if it was already -created. CreateStateMachine's idempotency check is based on the state machine name, -definition, type, LoggingConfiguration and TracingConfiguration. If a following request has -a different roleArn or tags, Step Functions will ignore these differences and treat it as -an idempotent request of the previous. In this case, roleArn and tags will not be updated, -even if they are different. +Step Functions User Guide. If you set the publish parameter of this API action to true, it +publishes version 1 as the first revision of the state machine. This operation is +eventually consistent. The results are best effort and may not reflect very recent updates +and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create +a duplicate resource if it was already created. CreateStateMachine's idempotency check is +based on the state machine name, definition, type, LoggingConfiguration, and +TracingConfiguration. The check is also based on the publish and versionDescription +parameters. If a following request has a different roleArn or tags, Step Functions will +ignore these differences and treat it as an idempotent request of the previous. In this +case, roleArn and tags will not be updated, even if they are different. # Arguments - `definition`: The Amazon States Language definition of the state machine. See Amazon @@ -85,6 +87,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"loggingConfiguration"`: Defines what execution history events are logged and where they are logged. By default, the level is set to OFF. For more information see Log Levels in the Step Functions User Guide. +- `"publish"`: Set to true to publish the first version of the state machine during + creation. The default is false. - `"tags"`: Tags to be added when creating a state machine. An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags. Tags may only contain @@ -92,6 +96,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tracingConfiguration"`: Selects whether X-Ray tracing is enabled. - `"type"`: Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. +- `"versionDescription"`: Sets description about the state machine version. You can only + set the description if the publish parameter is set to true. Otherwise, if you set + versionDescription, but publish to false, this API action throws ValidationException. """ function create_state_machine( definition, name, roleArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -126,6 +133,72 @@ function create_state_machine( ) end +""" + create_state_machine_alias(name, routing_configuration) + create_state_machine_alias(name, routing_configuration, params::Dict{String,<:Any}) + +Creates an alias for a state machine that points to one or two versions of the same state +machine. You can set your application to call StartExecution with an alias and update the +version the alias uses without changing the client's code. You can also map an alias to +split StartExecution requests between two versions of a state machine. To do this, add a +second RoutingConfig object in the routingConfiguration parameter. You must also specify +the percentage of execution run requests each version should receive in both RoutingConfig +objects. Step Functions randomly chooses which version runs a given execution based on the +percentage you specify. To create an alias that points to a single version, specify a +single RoutingConfig object with a weight set to 100. You can create up to 100 aliases for +each state machine. You must delete unused aliases using the DeleteStateMachineAlias API +action. CreateStateMachineAlias is an idempotent API. Step Functions bases the idempotency +check on the stateMachineArn, description, name, and routingConfiguration parameters. +Requests that contain the same values for these parameters return a successful idempotent +response without creating a duplicate resource. Related operations: +DescribeStateMachineAlias ListStateMachineAliases UpdateStateMachineAlias +DeleteStateMachineAlias + +# Arguments +- `name`: The name of the state machine alias. To avoid conflict with version ARNs, don't + use an integer in the name of the alias. +- `routing_configuration`: The routing configuration of a state machine alias. The routing + configuration shifts execution traffic between two state machine versions. + routingConfiguration contains an array of RoutingConfig objects that specify up to two + state machine versions. Step Functions then randomly choses which version to run an + execution with based on the weight assigned to each RoutingConfig. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the state machine alias. +""" +function create_state_machine_alias( + name, routingConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "CreateStateMachineAlias", + Dict{String,Any}("name" => name, "routingConfiguration" => routingConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_state_machine_alias( + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "CreateStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "routingConfiguration" => routingConfiguration + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_activity(activity_arn) delete_activity(activity_arn, params::Dict{String,<:Any}) @@ -164,15 +237,19 @@ end delete_state_machine(state_machine_arn, params::Dict{String,<:Any}) Deletes a state machine. This is an asynchronous operation: It sets the state machine's -status to DELETING and begins the deletion process. If the given state machine Amazon -Resource Name (ARN) is a qualified state machine ARN, it will fail with -ValidationException. A qualified state machine ARN refers to a Distributed Map state -defined within a state machine. For example, the qualified state machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. For EXPRESS state machines, the deletion will happen eventually (usually -less than a minute). Running executions may emit logs after DeleteStateMachine API is -called. +status to DELETING and begins the deletion process. A qualified state machine ARN can +either refer to a Distributed Map state defined within a state machine, a version ARN, or +an alias ARN. The following are some examples of qualified and unqualified state machine +ARNs: The following qualified state machine ARN refers to a Distributed Map state with a +label mapStateLabel in a state machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following unqualified state machine ARN refers to a +state machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine This API action also +deletes all versions and aliases associated with a state machine. For EXPRESS state +machines, the deletion happens eventually (usually in less than a minute). Running +executions may emit logs after DeleteStateMachine API is called. # Arguments - `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to delete. @@ -205,6 +282,96 @@ function delete_state_machine( ) end +""" + delete_state_machine_alias(state_machine_alias_arn) + delete_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Deletes a state machine alias. After you delete a state machine alias, you can't use it to +start executions. When you delete a state machine alias, Step Functions doesn't delete the +state machine versions that alias references. Related operations: +CreateStateMachineAlias DescribeStateMachineAlias ListStateMachineAliases +UpdateStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias to + delete. + +""" +function delete_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DeleteStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DeleteStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_state_machine_version(state_machine_version_arn) + delete_state_machine_version(state_machine_version_arn, params::Dict{String,<:Any}) + +Deletes a state machine version. After you delete a version, you can't call StartExecution +using that version's ARN or use the version with a state machine alias. Deleting a state +machine version won't terminate its in-progress executions. You can't delete a state +machine version currently referenced by one or more aliases. Before you delete a version, +you must either delete the aliases or update them to point to another state machine +version. Related operations: PublishStateMachineVersion ListStateMachineVersions + + +# Arguments +- `state_machine_version_arn`: The Amazon Resource Name (ARN) of the state machine version + to delete. + +""" +function delete_state_machine_version( + stateMachineVersionArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DeleteStateMachineVersion", + Dict{String,Any}("stateMachineVersionArn" => stateMachineVersionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_state_machine_version( + stateMachineVersionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DeleteStateMachineVersion", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineVersionArn" => stateMachineVersionArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_activity(activity_arn) describe_activity(activity_arn, params::Dict{String,<:Any}) @@ -243,12 +410,14 @@ end describe_execution(execution_arn) describe_execution(execution_arn, params::Dict{String,<:Any}) -Provides all information about a state machine execution, such as the state machine -associated with the execution, the execution input and output, and relevant execution -metadata. Use this API action to return the Map Run ARN if the execution was dispatched by -a Map Run. This operation is eventually consistent. The results are best effort and may -not reflect very recent updates and changes. This API action is not supported by EXPRESS -state machine executions unless they were dispatched by a Map Run. +Provides information about a state machine execution, such as the state machine associated +with the execution, the execution input and output, and relevant execution metadata. Use +this API action to return the Map Run Amazon Resource Name (ARN) if the execution was +dispatched by a Map Run. If you specify a version or alias ARN when you call the +StartExecution API action, DescribeExecution returns that ARN. This operation is +eventually consistent. The results are best effort and may not reflect very recent updates +and changes. Executions of an EXPRESS state machinearen't supported by DescribeExecution +unless a Map Run dispatched them. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to describe. @@ -316,17 +485,30 @@ end describe_state_machine(state_machine_arn, params::Dict{String,<:Any}) Provides information about a state machine's definition, its IAM role Amazon Resource Name -(ARN), and configuration. If the state machine ARN is a qualified state machine ARN, the -response returned includes the Map state's label. A qualified state machine ARN refers to a -Distributed Map state defined within a state machine. For example, the qualified state -machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. This operation is eventually consistent. The results are best effort and -may not reflect very recent updates and changes. +(ARN), and configuration. A qualified state machine ARN can either refer to a Distributed +Map state defined within a state machine, a version ARN, or an alias ARN. The following are +some examples of qualified and unqualified state machine ARNs: The following qualified +state machine ARN refers to a Distributed Map state with a label mapStateLabel in a state +machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> This API action returns the details for a state machine version if the +stateMachineArn you specify is a state machine version ARN. This operation is eventually +consistent. The results are best effort and may not reflect very recent updates and +changes. # Arguments -- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to describe. +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine for which you + want the information. If you specify a state machine version ARN, this API returns details + about that version. The version ARN is a combination of state machine ARN and the version + number separated by a colon (:). For example, stateMachineARN:1. """ function describe_state_machine( @@ -356,16 +538,57 @@ function describe_state_machine( ) end +""" + describe_state_machine_alias(state_machine_alias_arn) + describe_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Returns details about a state machine alias. Related operations: +CreateStateMachineAlias ListStateMachineAliases UpdateStateMachineAlias +DeleteStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias. + +""" +function describe_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "DescribeStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "DescribeStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_state_machine_for_execution(execution_arn) describe_state_machine_for_execution(execution_arn, params::Dict{String,<:Any}) Provides information about a state machine's definition, its execution role ARN, and -configuration. If an execution was dispatched by a Map Run, the Map Run is returned in the -response. Additionally, the state machine returned will be the state machine associated -with the Map Run. This operation is eventually consistent. The results are best effort and -may not reflect very recent updates and changes. This API action is not supported by -EXPRESS state machines. +configuration. If a Map Run dispatched the execution, this action returns the Map Run +Amazon Resource Name (ARN) in the response. The state machine returned is the state machine +associated with the Map Run. This operation is eventually consistent. The results are best +effort and may not reflect very recent updates and changes. This API action is not +supported by EXPRESS state machines. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution you want state machine @@ -540,14 +763,15 @@ end Lists all executions of a state machine or a Map Run. You can list all executions related to a state machine by specifying a state machine Amazon Resource Name (ARN), or those -related to a Map Run by specifying a Map Run ARN. Results are sorted by time, with the most -recent execution first. If nextToken is returned, there are more results available. The -value of nextToken is a unique pagination token for each page. Make the call again using -the returned token to retrieve the next page. Keep all other arguments unchanged. Each -pagination token expires after 24 hours. Using an expired pagination token will return an -HTTP 400 InvalidToken error. This operation is eventually consistent. The results are best -effort and may not reflect very recent updates and changes. This API action is not -supported by EXPRESS state machines. +related to a Map Run by specifying a Map Run ARN. You can also provide a state machine +alias ARN or version ARN to list the executions associated with a specific alias or +version. Results are sorted by time, with the most recent execution first. If nextToken is +returned, there are more results available. The value of nextToken is a unique pagination +token for each page. Make the call again using the returned token to retrieve the next +page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. +Using an expired pagination token will return an HTTP 400 InvalidToken error. This +operation is eventually consistent. The results are best effort and may not reflect very +recent updates and changes. This API action is not supported by EXPRESS state machines. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -566,7 +790,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. - `"stateMachineArn"`: The Amazon Resource Name (ARN) of the state machine whose executions - is listed. You can specify either a mapRunArn or a stateMachineArn, but not both. + is listed. You can specify either a mapRunArn or a stateMachineArn, but not both. You can + also return a list of executions associated with a specific alias or version, by specifying + an alias ARN or a version ARN in the stateMachineArn parameter. - `"statusFilter"`: If specified, only list the executions whose current execution status matches the given filter. """ @@ -628,6 +854,118 @@ function list_map_runs( ) end +""" + list_state_machine_aliases(state_machine_arn) + list_state_machine_aliases(state_machine_arn, params::Dict{String,<:Any}) + +Lists aliases for a specified state machine ARN. Results are sorted by time, with the most +recently created aliases listed first. To list aliases that reference a state machine +version, you can specify the version ARN in the stateMachineArn parameter. If nextToken is +returned, there are more results available. The value of nextToken is a unique pagination +token for each page. Make the call again using the returned token to retrieve the next +page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. +Using an expired pagination token will return an HTTP 400 InvalidToken error. Related +operations: CreateStateMachineAlias DescribeStateMachineAlias +UpdateStateMachineAlias DeleteStateMachineAlias + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine for which you + want to list aliases. If you specify a state machine version ARN, this API returns a list + of aliases for that version. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 100 and the maximum allowed + page size is 1000. A value of 0 uses the default. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_state_machine_aliases( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "ListStateMachineAliases", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_state_machine_aliases( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "ListStateMachineAliases", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_state_machine_versions(state_machine_arn) + list_state_machine_versions(state_machine_arn, params::Dict{String,<:Any}) + +Lists versions for the specified state machine Amazon Resource Name (ARN). The results are +sorted in descending order of the version creation time. If nextToken is returned, there +are more results available. The value of nextToken is a unique pagination token for each +page. Make the call again using the returned token to retrieve the next page. Keep all +other arguments unchanged. Each pagination token expires after 24 hours. Using an expired +pagination token will return an HTTP 400 InvalidToken error. Related operations: +PublishStateMachineVersion DeleteStateMachineVersion + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 100 and the maximum allowed + page size is 1000. A value of 0 uses the default. This is only an upper limit. The actual + number of results returned per call might be fewer than the specified maximum. +- `"nextToken"`: If nextToken is returned, there are more results available. The value of + nextToken is a unique pagination token for each page. Make the call again using the + returned token to retrieve the next page. Keep all other arguments unchanged. Each + pagination token expires after 24 hours. Using an expired pagination token will return an + HTTP 400 InvalidToken error. +""" +function list_state_machine_versions( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "ListStateMachineVersions", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_state_machine_versions( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "ListStateMachineVersions", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_state_machines() list_state_machines(params::Dict{String,<:Any}) @@ -699,6 +1037,63 @@ function list_tags_for_resource( ) end +""" + publish_state_machine_version(state_machine_arn) + publish_state_machine_version(state_machine_arn, params::Dict{String,<:Any}) + +Creates a version from the current revision of a state machine. Use versions to create +immutable snapshots of your state machine. You can start executions from versions either +directly or with an alias. To create an alias, use CreateStateMachineAlias. You can publish +up to 1000 versions for each state machine. You must manually delete unused versions using +the DeleteStateMachineVersion API action. PublishStateMachineVersion is an idempotent API. +It doesn't create a duplicate state machine version if it already exists for the current +revision. Step Functions bases PublishStateMachineVersion's idempotency check on the +stateMachineArn, name, and revisionId parameters. Requests with the same parameters return +a successful idempotent response. If you don't specify a revisionId, Step Functions checks +for a previously published version of the state machine's current revision. Related +operations: DeleteStateMachineVersion ListStateMachineVersions + +# Arguments +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: An optional description of the state machine version. +- `"revisionId"`: Only publish the state machine version if the current state machine's + revision ID matches the specified ID. Use this option to avoid publishing a version if the + state machine changed since you last updated it. If the specified revision ID doesn't match + the state machine's current revision ID, the API returns ConflictException. To specify an + initial revision ID for a state machine with no revision ID assigned, specify the string + INITIAL for the revisionId parameter. For example, you can specify a revisionID of INITIAL + when you create a state machine using the CreateStateMachine API action. +""" +function publish_state_machine_version( + stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "PublishStateMachineVersion", + Dict{String,Any}("stateMachineArn" => stateMachineArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function publish_state_machine_version( + stateMachineArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "PublishStateMachineVersion", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("stateMachineArn" => stateMachineArn), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ send_task_failure(task_token) send_task_failure(task_token, params::Dict{String,<:Any}) @@ -832,21 +1227,49 @@ end start_execution(state_machine_arn) start_execution(state_machine_arn, params::Dict{String,<:Any}) -Starts a state machine execution. If the given state machine Amazon Resource Name (ARN) is -a qualified state machine ARN, it will fail with ValidationException. A qualified state -machine ARN refers to a Distributed Map state defined within a state machine. For example, -the qualified state machine ARN -arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers -to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. StartExecution is idempotent for STANDARD workflows. For a STANDARD -workflow, if StartExecution is called with the same name and input as a running execution, -the call will succeed and return the same response as the original request. If the -execution is closed or if the input is different, it will return a 400 -ExecutionAlreadyExists error. Names can be reused after 90 days. StartExecution is not +Starts a state machine execution. A qualified state machine ARN can either refer to a +Distributed Map state defined within a state machine, a version ARN, or an alias ARN. The +following are some examples of qualified and unqualified state machine ARNs: The +following qualified state machine ARN refers to a Distributed Map state with a label +mapStateLabel in a state machine named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> If you start an execution with an unqualified state machine ARN, Step Functions +uses the latest revision of the state machine for the execution. To start executions of a +state machine version, call StartExecution and provide the version ARN or the ARN of an +alias that points to the version. StartExecution is idempotent for STANDARD workflows. +For a STANDARD workflow, if you call StartExecution with the same name and input as a +running execution, the call succeeds and return the same response as the original request. +If the execution is closed or if the input is different, it returns a 400 +ExecutionAlreadyExists error. You can reuse names after 90 days. StartExecution isn't idempotent for EXPRESS workflows. # Arguments -- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to execute. +- `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine to execute. The + stateMachineArn parameter accepts one of the following inputs: An unqualified state + machine ARN – Refers to a state machine ARN that isn't qualified with a version or alias + ARN. The following is an example of an unqualified state machine ARN. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne> Step Functions doesn't associate state machine executions that you start with an + unqualified ARN with a version. This is true even if that version uses the same revision + that the execution used. A state machine version ARN – Refers to a version ARN, which + is a combination of state machine ARN and the version number separated by a colon (:). The + following is an example of the ARN for version 10. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne>:10 Step Functions doesn't associate executions that you start with a version ARN + with any aliases that point to that version. A state machine alias ARN – Refers to an + alias ARN, which is a combination of state machine ARN and the alias name separated by a + colon (:). The following is an example of the ARN for an alias named PROD. + arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi + ne:PROD> Step Functions associates executions that you start with an alias ARN with + that alias and the state machine version used for that execution. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -854,8 +1277,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"input\": \"{\"first_name\" : \"test\"}\" If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\" Length constraints apply to the payload size, and are expressed as bytes in UTF-8 encoding. -- `"name"`: The name of the execution. This name must be unique for your Amazon Web - Services account, region, and state machine for 90 days. For more information, see Limits +- `"name"`: Optional name of the execution. This name must be unique for your Amazon Web + Services account, Region, and state machine for 90 days. For more information, see Limits Related to State Machine Executions in the Step Functions Developer Guide. A name must not contain: white space brackets < > { } [ ] wildcard characters ? * special characters \" # % ^ | ~ ` & , ; : / control characters (U+0000-001F, U+007F-009F) @@ -1111,15 +1534,30 @@ end Updates an existing state machine by modifying its definition, roleArn, or loggingConfiguration. Running executions will continue to use the previous definition and roleArn. You must include at least one of definition or roleArn or you will receive a -MissingRequiredParameter error. If the given state machine Amazon Resource Name (ARN) is a -qualified state machine ARN, it will fail with ValidationException. A qualified state -machine ARN refers to a Distributed Map state defined within a state machine. For example, -the qualified state machine ARN +MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map +state defined within a state machine. For example, the qualified state machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named -stateMachineName. All StartExecution calls within a few seconds will use the updated -definition and roleArn. Executions started immediately after calling UpdateStateMachine may -use the previous state machine definition and roleArn. +stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state +defined within a state machine, a version ARN, or an alias ARN. The following are some +examples of qualified and unqualified state machine ARNs: The following qualified state +machine ARN refers to a Distributed Map state with a label mapStateLabel in a state machine +named myStateMachine. +arn:partition:states:region:account-id:stateMachine:myStateMachine/mapStateLabel If you +provide a qualified state machine ARN that refers to a Distributed Map state, the request +fails with ValidationException. The following qualified state machine ARN refers to an +alias named PROD. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne:PROD> If you provide a qualified state machine ARN that refers to a version ARN or +an alias ARN, the request starts execution for that version or alias. The following +unqualified state machine ARN refers to a state machine named myStateMachine. +arn:<partition>:states:<region>:<account-id>:stateMachine:<myStateMachi +ne> After you update your state machine, you can set the publish parameter to true in +the same action to publish a new version. This way, you can opt-in to strict versioning of +your state machine. Step Functions assigns monotonically increasing integers for state +machine versions, starting at version number 1. All StartExecution calls within a few +seconds use the updated definition and roleArn. Executions started immediately after you +call UpdateStateMachine may use the previous state machine definition and roleArn. # Arguments - `state_machine_arn`: The Amazon Resource Name (ARN) of the state machine. @@ -1128,10 +1566,14 @@ use the previous state machine definition and roleArn. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"definition"`: The Amazon States Language definition of the state machine. See Amazon States Language. -- `"loggingConfiguration"`: The LoggingConfiguration data type is used to set CloudWatch - Logs options. +- `"loggingConfiguration"`: Use the LoggingConfiguration data type to set CloudWatch Logs + options. +- `"publish"`: Specifies whether the state machine version is published. The default is + false. To publish a version after updating the state machine, set publish to true. - `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role of the state machine. - `"tracingConfiguration"`: Selects whether X-Ray tracing is enabled. +- `"versionDescription"`: An optional description of the state machine version to publish. + You can only specify the versionDescription parameter if you've set publish to true. """ function update_state_machine( stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1159,3 +1601,57 @@ function update_state_machine( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_state_machine_alias(state_machine_alias_arn) + update_state_machine_alias(state_machine_alias_arn, params::Dict{String,<:Any}) + +Updates the configuration of an existing state machine alias by modifying its description +or routingConfiguration. You must specify at least one of the description or +routingConfiguration parameters to update a state machine alias. UpdateStateMachineAlias +is an idempotent API. Step Functions bases the idempotency check on the +stateMachineAliasArn, description, and routingConfiguration parameters. Requests with the +same parameters return an idempotent response. This operation is eventually consistent. +All StartExecution requests made within a few seconds use the latest alias configuration. +Executions started immediately after calling UpdateStateMachineAlias may use the previous +routing configuration. Related operations: CreateStateMachineAlias +DescribeStateMachineAlias ListStateMachineAliases DeleteStateMachineAlias + +# Arguments +- `state_machine_alias_arn`: The Amazon Resource Name (ARN) of the state machine alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the state machine alias. +- `"routingConfiguration"`: The routing configuration of the state machine alias. An array + of RoutingConfig objects that specifies up to two state machine versions that the alias + starts executions for. +""" +function update_state_machine_alias( + stateMachineAliasArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return sfn( + "UpdateStateMachineAlias", + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_state_machine_alias( + stateMachineAliasArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sfn( + "UpdateStateMachineAlias", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("stateMachineAliasArn" => stateMachineAliasArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/snowball.jl b/src/services/snowball.jl index 94eea5ec34..d434b519af 100644 --- a/src/services/snowball.jl +++ b/src/services/snowball.jl @@ -244,14 +244,14 @@ Description: Snowcone Device type: EDGE_S Capacity: T98 Description: Edge Storage Optimized for data transfer only Device type: EDGE_CG Capacity: T42 Description: Snowball Edge Compute Optimized with GPU Device type: EDGE_C Capacity: T42 Description: Snowball Edge Compute Optimized without GPU Device type: EDGE -Capacity: T100 Description: Snowball Edge Storage Optimized with EC2 Compute Device -type: STANDARD Capacity: T50 Description: Original Snowball device This device is -only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region -Device type: STANDARD Capacity: T80 Description: Original Snowball device This device -is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region. -Device type: V3_5C Capacity: T32 Description: Snowball Edge Compute Optimized without -GPU Device type: V3_5S Capacity: T240 Description: Snowball Edge Storage -Optimized 210TB +Capacity: T100 Description: Snowball Edge Storage Optimized with EC2 Compute This +device is replaced with T98. Device type: STANDARD Capacity: T50 Description: +Original Snowball device This device is only available in the Ningxia, Beijing, and +Singapore Amazon Web Services Region Device type: STANDARD Capacity: T80 +Description: Original Snowball device This device is only available in the Ningxia, +Beijing, and Singapore Amazon Web Services Region. Snow Family device type: +RACK_5U_C Capacity: T13 Description: Snowblade. Device type: V3_5S Capacity: +T240 Description: Snowball Edge Storage Optimized 210TB # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -269,6 +269,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (Snow Family Devices and Capacity) in the Snowcone User Guide. - `"ForwardingAddressId"`: The forwarding address ID for a job. This field is not supported in most Regions. +- `"ImpactLevel"`: The highest impact level of data that will be stored or processed on the + device, provided at job creation. - `"JobType"`: Defines the type of job that you're creating. - `"KmsKeyARN"`: The KmsKeyARN that you want to associate with this job. KmsKeyARNs are created using the CreateKey Key Management Service (KMS) API action. @@ -279,10 +281,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys device that your transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File System) and the Amazon Web Services Storage Gateway service Tape Gateway type. +- `"PickupDetails"`: Information identifying the person picking up the device. - `"RemoteManagement"`: Allows you to securely operate and manage Snowcone devices remotely from outside of your internal network. When set to INSTALLED_AUTOSTART, remote management will automatically be available when the device arrives at your location. Otherwise, you - need to use the Snowball Client to manage the device. + need to use the Snowball Edge client to manage the device. When set to NOT_INSTALLED, + remote management will not be available on the device. - `"Resources"`: Defines the Amazon S3 buckets associated with this job. With IMPORT jobs, you specify the bucket or buckets that your transferred data will be imported into. With EXPORT jobs, you specify the bucket or buckets that your transferred data will be exported @@ -330,8 +334,8 @@ function create_job( end """ - create_long_term_pricing(long_term_pricing_type) - create_long_term_pricing(long_term_pricing_type, params::Dict{String,<:Any}) + create_long_term_pricing(long_term_pricing_type, snowball_type) + create_long_term_pricing(long_term_pricing_type, snowball_type, params::Dict{String,<:Any}) Creates a job with the long-term usage option for a device. The long-term usage is a 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web @@ -340,25 +344,28 @@ Services provides discounts for long-term pricing. # Arguments - `long_term_pricing_type`: The type of long-term pricing option you want for the device, either 1-year or 3-year long-term pricing. +- `snowball_type`: The type of Snow Family devices to use for the long-term pricing job. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IsLongTermPricingAutoRenew"`: Specifies whether the current long-term pricing type for the device should be renewed. -- `"SnowballType"`: The type of Snow Family devices to use for the long-term pricing job. """ function create_long_term_pricing( - LongTermPricingType; aws_config::AbstractAWSConfig=global_aws_config() + LongTermPricingType, SnowballType; aws_config::AbstractAWSConfig=global_aws_config() ) return snowball( "CreateLongTermPricing", - Dict{String,Any}("LongTermPricingType" => LongTermPricingType); + Dict{String,Any}( + "LongTermPricingType" => LongTermPricingType, "SnowballType" => SnowballType + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_long_term_pricing( LongTermPricingType, + SnowballType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -367,7 +374,10 @@ function create_long_term_pricing( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}("LongTermPricingType" => LongTermPricingType), + Dict{String,Any}( + "LongTermPricingType" => LongTermPricingType, + "SnowballType" => SnowballType, + ), params, ), ); @@ -778,10 +788,10 @@ end list_compatible_images() list_compatible_images(params::Dict{String,<:Any}) -This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs) that -are owned by your Amazon Web Services accountthat would be supported for use on a Snow -device. Currently, supported AMIs are based on the Amazon Linux-2, Ubuntu 20.04 LTS - -Focal, or Ubuntu 22.04 LTS - Jammy images, available on the Amazon Web Services +This action returns a list of the different Amazon EC2-compatible Amazon Machine Images +(AMIs) that are owned by your Amazon Web Services accountthat would be supported for use on +a Snow device. Currently, supported AMIs are based on the Amazon Linux-2, Ubuntu 20.04 LTS +- Focal, or Ubuntu 22.04 LTS - Jammy images, available on the Amazon Web Services Marketplace. Ubuntu 16.04 LTS - Xenial (HVM) images are no longer supported in the Market, but still supported for use on devices through Amazon EC2 VM Import/Export and running locally in AMIs. @@ -865,6 +875,35 @@ function list_long_term_pricing( ) end +""" + list_pickup_locations() + list_pickup_locations(params::Dict{String,<:Any}) + +A list of locations from which the customer can choose to pickup a device. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of locations to list per page. +- `"NextToken"`: HTTP requests are stateless. To identify what object comes \"next\" in the + list of ListPickupLocationsRequest objects, you have the option of specifying NextToken as + the starting point for your returned list. +""" +function list_pickup_locations(; aws_config::AbstractAWSConfig=global_aws_config()) + return snowball( + "ListPickupLocations"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_pickup_locations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return snowball( + "ListPickupLocations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_service_versions(service_name) list_service_versions(service_name, params::Dict{String,<:Any}) @@ -985,6 +1024,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys device that your transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File System) and the Amazon Web Services Storage Gateway service Tape Gateway type. +- `"PickupDetails"`: - `"Resources"`: The updated JobResource object, or the updated JobResource object. - `"RoleARN"`: The new role Amazon Resource Name (ARN) that you want to associate with this job. To create a role ARN, use the CreateRoleIdentity and Access Management (IAM) API diff --git a/src/services/sqs.jl b/src/services/sqs.jl index 5fd3398d9b..cbf03dd63c 100644 --- a/src/services/sqs.jl +++ b/src/services/sqs.jl @@ -100,10 +100,15 @@ end cancel_message_move_task(task_handle) cancel_message_move_task(task_handle, params::Dict{String,<:Any}) -Cancels a specified message movement task. A message movement can only be cancelled when -the current status is RUNNING. Cancelling a message movement task does not revert the +Cancels a specified message movement task. A message movement can only be cancelled when +the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been -moved yet. +moved yet. This action is currently limited to supporting message redrive from +dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue +(DLQ), while the destination queue can be the original source queue (from which the +messages were driven to the dead-letter-queue), or a custom destination queue. +Currently, only standard queues are supported. Only one active message movement task is +supported per queue at any given time. # Arguments - `task_handle`: An identifier associated with a message movement task. @@ -807,6 +812,12 @@ end list_message_move_tasks(source_arn, params::Dict{String,<:Any}) Gets the most recent message movement tasks (up to 10) under a specific source queue. +This action is currently limited to supporting message redrive from dead-letter queues +(DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the +destination queue can be the original source queue (from which the messages were driven to +the dead-letter-queue), or a custom destination queue. Currently, only standard queues +are supported. Only one active message movement task is supported per queue at any given +time. # Arguments - `source_arn`: The ARN of the queue whose message movement tasks are to be listed. @@ -914,12 +925,12 @@ end purge_queue(queue_url) purge_queue(queue_url, params::Dict{String,<:Any}) -Deletes the messages in a queue specified by the QueueURL parameter. When you use the -PurgeQueue action, you can't retrieve any messages deleted from a queue. The message -deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of -your queue's size. Messages sent to the queue before you call PurgeQueue might be -received but are deleted within the next minute. Messages sent to the queue after you call -PurgeQueue might be deleted while the queue is being purged. +Deletes available messages in a queue (including in-flight messages) specified by the +QueueURL parameter. When you use the PurgeQueue action, you can't retrieve any messages +deleted from a queue. The message deletion process takes up to 60 seconds. We recommend +waiting for 60 seconds regardless of your queue's size. Messages sent to the queue before +you call PurgeQueue might be received but are deleted within the next minute. Messages sent +to the queue after you call PurgeQueue might be deleted while the queue is being purged. # Arguments - `queue_url`: The URL of the queue from which the PurgeQueue action deletes messages. @@ -1432,15 +1443,20 @@ end Starts an asynchronous task to move messages from a specified source queue to a specified destination queue. This action is currently limited to supporting message redrive from -dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue -(DLQ), while the destination queue can be the original source queue (from which the -messages were driven to the dead-letter-queue), or a custom destination queue. -Currently, only standard queues are supported. Only one active message movement task is -supported per queue at any given time. +queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. +Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are +currently not supported. In dead-letter queues redrive context, the StartMessageMoveTask +the source queue is the DLQ, while the destination queue can be the original source queue +(from which the messages were driven to the dead-letter-queue), or a custom destination +queue. Currently, only standard queues support redrive. FIFO queues don't support +redrive. Only one active message movement task is supported per queue at any given time. + # Arguments - `source_arn`: The ARN of the queue that contains the messages to be moved to another - queue. Currently, only dead-letter queue (DLQ) ARNs are accepted. + queue. Currently, only ARNs of dead-letter queues (DLQs) whose sources are other Amazon SQS + queues are accepted. DLQs whose sources are non-SQS queues, such as Lambda or Amazon SNS + topics, are not currently supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/ssm.jl b/src/services/ssm.jl index 6b98ca6297..5311e44a48 100644 --- a/src/services/ssm.jl +++ b/src/services/ssm.jl @@ -33,7 +33,8 @@ EC2) instances, see Tagging your Amazon EC2 resources in the Amazon EC2 User Gui OpsMetadata object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager. For the Document and - Parameter values, use the name of the resource. ManagedInstance: mi-012345abcde The + Parameter values, use the name of the resource. If you're tagging a shared document, you + must use the full ARN of the document. ManagedInstance: mi-012345abcde The ManagedInstance type for this API operation is only for on-premises managed nodes. You must specify the name of the managed node in the following format: mi-ID_number . For example, mi-1a2b3c4d5e6f. @@ -651,12 +652,12 @@ end create_ops_item(description, source, title, params::Dict{String,<:Any}) Creates a new OpsItem. You must have permission in Identity and Access Management (IAM) to -create a new OpsItem. For more information, see Getting started with OpsCenter in the -Amazon Web Services Systems Manager User Guide. Operations engineers and IT professionals -use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate -operational issues impacting the performance and health of their Amazon Web Services -resources. For more information, see Amazon Web Services Systems Manager OpsCenter in the -Amazon Web Services Systems Manager User Guide. +create a new OpsItem. For more information, see Set up OpsCenter in the Amazon Web Services +Systems Manager User Guide. Operations engineers and IT professionals use Amazon Web +Services Systems Manager OpsCenter to view, investigate, and remediate operational issues +impacting the performance and health of their Amazon Web Services resources. For more +information, see Amazon Web Services Systems Manager OpsCenter in the Amazon Web Services +Systems Manager User Guide. # Arguments - `description`: Information about the OpsItem. @@ -669,8 +670,8 @@ Amazon Web Services Systems Manager User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AccountId"`: The target Amazon Web Services account where you want to create an OpsItem. To make this call, your account must be configured to work with OpsItems across - accounts. For more information, see Setting up OpsCenter to work with OpsItems across - accounts in the Amazon Web Services Systems Manager User Guide. + accounts. For more information, see Set up OpsCenter in the Amazon Web Services Systems + Manager User Guide. - `"ActualEndTime"`: The time a runbook workflow ended. Currently reported only for the OpsItem type /aws/changerequest. - `"ActualStartTime"`: The time a runbook workflow started. Currently reported only for the @@ -706,13 +707,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys OpsItems. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource. - `"Severity"`: Specify a severity to assign to an OpsItem. -- `"Tags"`: Optional metadata that you assign to a resource. You can restrict access to - OpsItems by using an inline IAM policy that specifies tags. For more information, see - Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide. Tags - use a key-value pair. For example: Key=Department,Value=Finance To add tags to a new - OpsItem, a user must have IAM permissions for both the ssm:CreateOpsItems operation and the - ssm:AddTagsToResource operation. To add tags to an existing OpsItem, use the - AddTagsToResource operation. +- `"Tags"`: Optional metadata that you assign to a resource. Tags use a key-value pair. For + example: Key=Department,Value=Finance To add tags to a new OpsItem, a user must have IAM + permissions for both the ssm:CreateOpsItems operation and the ssm:AddTagsToResource + operation. To add tags to an existing OpsItem, use the AddTagsToResource operation. """ function create_ops_item( Description, Source, Title; aws_config::AbstractAWSConfig=global_aws_config() @@ -2028,26 +2026,29 @@ end describe_instance_information() describe_instance_information(params::Dict{String,<:Any}) -Describes one or more of your managed nodes, including information about the operating -system platform, the version of SSM Agent installed on the managed node, node status, and -so on. If you specify one or more managed node IDs, it returns information for those -managed nodes. If you don't specify node IDs, it returns information for all your managed -nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive -an error. The IamRole field for this API operation is the Identity and Access Management -(IAM) role assigned to on-premises managed nodes. This call doesn't return the IAM role for -EC2 instances. +Provides information about one or more of your managed nodes, including the operating +system platform, SSM Agent version, association status, and IP address. This operation does +not return information for nodes that are either Stopped or Terminated. If you specify one +or more node IDs, the operation returns information for those managed nodes. If you don't +specify node IDs, it returns information for all your managed nodes. If you specify a node +ID that isn't valid or a node that you don't own, you receive an error. The IamRole field +returned for this API operation is the Identity and Access Management (IAM) role assigned +to on-premises managed nodes. This operation does not return the IAM role for EC2 +instances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: One or more filters. Use a filter to return a more specific list of managed - nodes. You can filter based on tags applied to your managed nodes. Use this Filters data - type instead of InstanceInformationFilterList, which is deprecated. + nodes. You can filter based on tags applied to your managed nodes. Tag filters can't be + combined with other filter types. Use this Filters data type instead of + InstanceInformationFilterList, which is deprecated. - `"InstanceInformationFilterList"`: This is a legacy method. We recommend that you don't use this method. Instead, use the Filters data type. Filters enables you to return node information by filtering based on tags applied to managed nodes. Attempting to use InstanceInformationFilterList and Filters leads to an exception error. - `"MaxResults"`: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. + The default value is 10 items. - `"NextToken"`: The token for the next set of items to return. (You received this token from a previous call.) """ @@ -2594,12 +2595,11 @@ end describe_ops_items(params::Dict{String,<:Any}) Query a set of OpsItems. You must have permission in Identity and Access Management (IAM) -to query a list of OpsItems. For more information, see Getting started with OpsCenter in -the Amazon Web Services Systems Manager User Guide. Operations engineers and IT -professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and -remediate operational issues impacting the performance and health of their Amazon Web -Services resources. For more information, see OpsCenter in the Amazon Web Services Systems -Manager User Guide. +to query a list of OpsItems. For more information, see Set up OpsCenter in the Amazon Web +Services Systems Manager User Guide. Operations engineers and IT professionals use Amazon +Web Services Systems Manager OpsCenter to view, investigate, and remediate operational +issues impacting the performance and health of their Amazon Web Services resources. For +more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2613,11 +2613,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Operations: Equals,Contains Key: OperationalData** Operations: Equals Key: OperationalDataKey Operations: Equals Key: OperationalDataValue Operations: Equals, Contains Key: OpsItemId Operations: Equals Key: ResourceId Operations: Contains Key: - AutomationId Operations: Equals *The Equals operator for Title matches the first 100 - characters. If you specify more than 100 characters, they system returns an error that the - filter value exceeds the length limit. **If you filter the response by using the - OperationalData operator, specify a key-value pair by using the following JSON format: - {\"key\":\"key_name\",\"value\":\"a_value\"} + AutomationId Operations: Equals Key: AccountId Operations: Equals *The Equals operator + for Title matches the first 100 characters. If you specify more than 100 characters, they + system returns an error that the filter value exceeds the length limit. **If you filter the + response by using the OperationalData operator, specify a key-value pair by using the + following JSON format: {\"key\":\"key_name\",\"value\":\"a_value\"} """ function describe_ops_items(; aws_config::AbstractAWSConfig=global_aws_config()) return ssm("DescribeOpsItems"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -3491,12 +3491,12 @@ end get_ops_item(ops_item_id, params::Dict{String,<:Any}) Get information about an OpsItem by using the ID. You must have permission in Identity and -Access Management (IAM) to view information about an OpsItem. For more information, see -Getting started with OpsCenter in the Amazon Web Services Systems Manager User Guide. -Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter -to view, investigate, and remediate operational issues impacting the performance and health -of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon -Web Services Systems Manager User Guide. +Access Management (IAM) to view information about an OpsItem. For more information, see Set +up OpsCenter in the Amazon Web Services Systems Manager User Guide. Operations engineers +and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, +investigate, and remediate operational issues impacting the performance and health of their +Amazon Web Services resources. For more information, see OpsCenter in the Amazon Web +Services Systems Manager User Guide. # Arguments - `ops_item_id`: The ID of the OpsItem that you want to get. @@ -6550,12 +6550,11 @@ end update_ops_item(ops_item_id, params::Dict{String,<:Any}) Edit or change an OpsItem. You must have permission in Identity and Access Management (IAM) -to update an OpsItem. For more information, see Getting started with OpsCenter in the -Amazon Web Services Systems Manager User Guide. Operations engineers and IT professionals -use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate -operational issues impacting the performance and health of their Amazon Web Services -resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager -User Guide. +to update an OpsItem. For more information, see Set up OpsCenter in the Amazon Web Services +Systems Manager User Guide. Operations engineers and IT professionals use Amazon Web +Services Systems Manager OpsCenter to view, investigate, and remediate operational issues +impacting the performance and health of their Amazon Web Services resources. For more +information, see OpsCenter in the Amazon Web Services Systems Manager User Guide. # Arguments - `ops_item_id`: The ID of the OpsItem. diff --git a/src/services/ssm_sap.jl b/src/services/ssm_sap.jl index 150162eb25..9d3a234de1 100644 --- a/src/services/ssm_sap.jl +++ b/src/services/ssm_sap.jl @@ -559,6 +559,43 @@ function register_application( ) end +""" + start_application_refresh(application_id) + start_application_refresh(application_id, params::Dict{String,<:Any}) + +Refreshes a registered application. + +# Arguments +- `application_id`: The ID of the application. + +""" +function start_application_refresh( + ApplicationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_sap( + "POST", + "/start-application-refresh", + Dict{String,Any}("ApplicationId" => ApplicationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_application_refresh( + ApplicationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_sap( + "POST", + "/start-application-refresh", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ApplicationId" => ApplicationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -643,6 +680,7 @@ Updates the settings of an application registered with AWS Systems Manager for S # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Backint"`: Installation of AWS Backint Agent for SAP HANA. - `"CredentialsToAddOrUpdate"`: The credentials to be added or updated. - `"CredentialsToRemove"`: The credentials to be removed. """ diff --git a/src/services/sts.jl b/src/services/sts.jl index ddf5c0f02b..5cb6ac557a 100644 --- a/src/services/sts.jl +++ b/src/services/sts.jl @@ -143,6 +143,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide. +- `"ProvidedContexts"`: Reserved for future use. - `"SerialNumber"`: The identification number of the MFA device that is associated with the user who is making the AssumeRole call. Specify this value if the trust policy of the role being assumed includes a condition that requires MFA authentication. The value is either @@ -500,7 +501,8 @@ use web identity federation to get access to content in Amazon S3. - `web_identity_token`: The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application - makes an AssumeRoleWithWebIdentity call. + makes an AssumeRoleWithWebIdentity call. Only tokens with RSA algorithms (RS256) are + supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/transcribe.jl b/src/services/transcribe.jl index cbf0b09a64..76b38ea110 100644 --- a/src/services/transcribe.jl +++ b/src/services/transcribe.jl @@ -1847,6 +1847,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Tags"`: Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you start this new job. To learn more about using tags with Amazon Transcribe, refer to Tagging resources. +- `"ToxicityDetection"`: Enables toxic speech detection in your transcript. If you include + ToxicityDetection in your request, you must also include ToxicityCategories. For + information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic + speech. """ function start_transcription_job( Media, TranscriptionJobName; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/transfer.jl b/src/services/transfer.jl index b2d3226404..2be6cec99a 100644 --- a/src/services/transfer.jl +++ b/src/services/transfer.jl @@ -120,7 +120,11 @@ the AS2 process is identified with the LocalProfileId. the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. + to send with StartFileTransfer. If you are using Basic authentication for your AS2 + connector, the access role requires the secretsmanager:GetSecretValue permission for the + secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web + Services managed key in Secrets Manager, then the role also needs the kms:Decrypt + permission for that key. - `base_directory`: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory. - `local_profile_id`: A unique identifier for the AS2 local profile. @@ -185,12 +189,13 @@ function create_agreement( end """ - create_connector(access_role, as2_config, url) - create_connector(access_role, as2_config, url, params::Dict{String,<:Any}) + create_connector(access_role, url) + create_connector(access_role, url, params::Dict{String,<:Any}) Creates the connector, which captures the parameters for an outbound connection for the AS2 -protocol. The connector is required for sending files to an externally hosted AS2 server. -For more details about connectors, see Create AS2 connectors. +or SFTP protocol. The connector is required for sending files to an externally hosted AS2 +or SFTP server. For more details about AS2 connectors, see Create AS2 connectors. You must +specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig). # Arguments - `access_role`: With AS2, you can send files by calling StartFileTransfer and specifying @@ -201,33 +206,35 @@ For more details about connectors, see Create AS2 connectors. the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. -- `as2_config`: A structure that contains the parameters for a connector object. -- `url`: The URL of the partner's AS2 endpoint. + to send with StartFileTransfer. If you are using Basic authentication for your AS2 + connector, the access role requires the secretsmanager:GetSecretValue permission for the + secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web + Services managed key in Secrets Manager, then the role also needs the kms:Decrypt + permission for that key. +- `url`: The URL of the partner's AS2 or SFTP endpoint. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"As2Config"`: A structure that contains the parameters for an AS2 connector object. - `"LoggingRole"`: The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs. +- `"SftpConfig"`: A structure that contains the parameters for an SFTP connector object. - `"Tags"`: Key-value pairs that can be used to group and search for connectors. Tags are metadata attached to connectors for any purpose. """ function create_connector( - AccessRole, As2Config, Url; aws_config::AbstractAWSConfig=global_aws_config() + AccessRole, Url; aws_config::AbstractAWSConfig=global_aws_config() ) return transfer( "CreateConnector", - Dict{String,Any}( - "AccessRole" => AccessRole, "As2Config" => As2Config, "Url" => Url - ); + Dict{String,Any}("AccessRole" => AccessRole, "Url" => Url); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_connector( AccessRole, - As2Config, Url, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -236,11 +243,7 @@ function create_connector( "CreateConnector", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "AccessRole" => AccessRole, "As2Config" => As2Config, "Url" => Url - ), - params, + _merge, Dict{String,Any}("AccessRole" => AccessRole, "Url" => Url), params ), ); aws_config=aws_config, @@ -417,6 +420,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EndpointType must be VPC, and domain must be Amazon S3. - `"SecurityPolicyName"`: Specifies the name of the security policy that is attached to the server. +- `"StructuredLogDestinations"`: Specifies the log groups to which your server logs are + sent. To specify a log group, you must provide the ARN for an existing log group. In this + case, the format of the log group is as follows: + arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, + arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously + specified a log group for a server, you can clear it, and in effect turn off structured + logging, by providing an empty value for this parameter in an update-server call. For + example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations - `"Tags"`: Key-value pairs that can be used to group and search for servers. - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute @@ -474,8 +485,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ] In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target - to the HomeDirectory parameter value. The following is an Entry and Target pair example for - chroot. [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ] + to the value the user should see for their home directory when they log in. The following + is an Entry and Target pair example for chroot. [ { \"Entry\": \"/\", \"Target\": + \"/bucket_name/home/mydirectory\" } ] - `"HomeDirectoryType"`: The type of landing directory (folder) that you want your users' home directory to be when they log in to the server. If you set it to PATH, the user will see the absolute Amazon S3 bucket or EFS paths as is in their file transfer protocol @@ -716,7 +728,7 @@ end delete_connector(connector_id) delete_connector(connector_id, params::Dict{String,<:Any}) -Deletes the agreement that's specified in the provided ConnectorId. +Deletes the connector that's specified in the provided ConnectorId. # Arguments - `connector_id`: The unique identifier for the connector. @@ -2055,44 +2067,54 @@ function send_workflow_step_state( end """ - start_file_transfer(connector_id, send_file_paths) - start_file_transfer(connector_id, send_file_paths, params::Dict{String,<:Any}) + start_file_transfer(connector_id) + start_file_transfer(connector_id, params::Dict{String,<:Any}) -Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId and -the file paths for where to send the files. +Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP +server. For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths +to identify the files you want to transfer. For an SFTP connector, the file transfer can +be either outbound or inbound. In both cases, you specify the ConnectorId. Depending on the +direction of the transfer, you also specify the following items: If you are transferring +file from a partner's SFTP server to a Transfer Family server, you specify one or more +RetreiveFilePaths to identify the files you want to transfer, and a LocalDirectoryPath to +specify the destination folder. If you are transferring file to a partner's SFTP server +from Amazon Web Services storage, you specify one or more SendFilePaths to identify the +files you want to transfer, and a RemoteDirectoryPath to specify the destination folder. # Arguments - `connector_id`: The unique identifier for the connector. -- `send_file_paths`: An array of strings. Each string represents the absolute path for one - outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt . -""" -function start_file_transfer( - ConnectorId, SendFilePaths; aws_config::AbstractAWSConfig=global_aws_config() -) +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LocalDirectoryPath"`: For an inbound transfer, the LocaDirectoryPath specifies the + destination for one or more files that are transferred from the partner's SFTP server. +- `"RemoteDirectoryPath"`: For an outbound transfer, the RemoteDirectoryPath specifies the + destination for one or more files that are transferred to the partner's SFTP server. If you + don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP + user's home directory. +- `"RetrieveFilePaths"`: One or more source paths for the partner's SFTP server. Each + string represents a source file path for one inbound file transfer. +- `"SendFilePaths"`: One or more source paths for the Transfer Family server. Each string + represents a source file path for one outbound file transfer. For example, + DOC-EXAMPLE-BUCKET/myfile.txt . +""" +function start_file_transfer(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) return transfer( "StartFileTransfer", - Dict{String,Any}("ConnectorId" => ConnectorId, "SendFilePaths" => SendFilePaths); + Dict{String,Any}("ConnectorId" => ConnectorId); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function start_file_transfer( ConnectorId, - SendFilePaths, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return transfer( "StartFileTransfer", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "ConnectorId" => ConnectorId, "SendFilePaths" => SendFilePaths - ), - params, - ), + mergewith(_merge, Dict{String,Any}("ConnectorId" => ConnectorId), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2216,6 +2238,41 @@ function tag_resource( ) end +""" + test_connection(connector_id) + test_connection(connector_id, params::Dict{String,<:Any}) + +Tests whether your SFTP connector is set up successfully. We highly recommend that you call +this operation to test your ability to transfer files between a Transfer Family server and +a trading partner's SFTP server. + +# Arguments +- `connector_id`: The unique identifier for the connector. + +""" +function test_connection(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) + return transfer( + "TestConnection", + Dict{String,Any}("ConnectorId" => ConnectorId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function test_connection( + ConnectorId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return transfer( + "TestConnection", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ConnectorId" => ConnectorId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ test_identity_provider(server_id, user_name) test_identity_provider(server_id, user_name, params::Dict{String,<:Any}) @@ -2436,7 +2493,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. + to send with StartFileTransfer. If you are using Basic authentication for your AS2 + connector, the access role requires the secretsmanager:GetSecretValue permission for the + secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web + Services managed key in Secrets Manager, then the role also needs the kms:Decrypt + permission for that key. - `"BaseDirectory"`: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory . @@ -2539,12 +2600,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend - to send with StartFileTransfer. -- `"As2Config"`: A structure that contains the parameters for a connector object. + to send with StartFileTransfer. If you are using Basic authentication for your AS2 + connector, the access role requires the secretsmanager:GetSecretValue permission for the + secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web + Services managed key in Secrets Manager, then the role also needs the kms:Decrypt + permission for that key. +- `"As2Config"`: A structure that contains the parameters for an AS2 connector object. - `"LoggingRole"`: The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs. -- `"Url"`: The URL of the partner's AS2 endpoint. +- `"SftpConfig"`: A structure that contains the parameters for an SFTP connector object. +- `"Url"`: The URL of the partner's AS2 or SFTP endpoint. """ function update_connector(ConnectorId; aws_config::AbstractAWSConfig=global_aws_config()) return transfer( @@ -2759,6 +2825,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys EndpointType must be VPC, and domain must be Amazon S3. - `"SecurityPolicyName"`: Specifies the name of the security policy that is attached to the server. +- `"StructuredLogDestinations"`: Specifies the log groups to which your server logs are + sent. To specify a log group, you must provide the ARN for an existing log group. In this + case, the format of the log group is as follows: + arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, + arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously + specified a log group for a server, you can clear it, and in effect turn off structured + logging, by providing an empty value for this parameter in an update-server call. For + example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and diff --git a/src/services/verifiedpermissions.jl b/src/services/verifiedpermissions.jl index af1304036f..ad9561fbc7 100644 --- a/src/services/verifiedpermissions.jl +++ b/src/services/verifiedpermissions.jl @@ -161,7 +161,9 @@ end create_policy_store(validation_settings) create_policy_store(validation_settings, params::Dict{String,<:Any}) -Creates a policy store. A policy store is a container for policy resources. +Creates a policy store. A policy store is a container for policy resources. Although Cedar +supports multiple namespaces, Verified Permissions currently supports only one namespace +per policy store. # Arguments - `validation_settings`: Specifies the validation setting for this policy store. Currently, @@ -682,8 +684,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys principal authorized to perform this action on the resource? - `"context"`: Specifies additional context that can be used to make more granular authorization decisions. -- `"entities"`: Specifies the list of entities and their associated attributes that - Verified Permissions can examine when evaluating the policies. +- `"entities"`: Specifies the list of resources and principals and their associated + attributes that Verified Permissions can examine when evaluating the policies. You can + include only principal and resource entities in this parameter; you can't include actions. + You must specify actions in the schema. - `"principal"`: Specifies the principal for which the authorization decision is to be made. - `"resource"`: Specifies the resource for which the authorization decision is to be made. """ @@ -735,8 +739,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys authorized to perform this action on the specified resource. - `"context"`: Specifies additional context that can be used to make more granular authorization decisions. -- `"entities"`: Specifies the list of entities and their associated attributes that - Verified Permissions can examine when evaluating the policies. +- `"entities"`: Specifies the list of resources and principals and their associated + attributes that Verified Permissions can examine when evaluating the policies. You can + include only principal and resource entities in this parameter; you can't include actions. + You must specify actions in the schema. - `"identityToken"`: Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.